1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193161941619516196161971619816199162001620116202162031620416205162061620716208162091621016211162121621316214162151621616217162181621916220162211622216223162241622516226162271622816229162301623116232162331623416235162361623716238162391624016241162421624316244162451624616247162481624916250162511625216253162541625516256162571625816259162601626116262162631626416265162661626716268162691627016271162721627316274162751627616277162781627916280162811628216283162841628516286162871628816289162901629116292162931629416295162961629716298162991630016301163021630316304163051630616307163081630916310163111631216313163141631516316163171631816319163201632116322163231632416325163261632716328163291633016331163321633316334163351633616337163381633916340163411634216343163441634516346163471634816349163501635116352163531635416355163561635716358163591636016361163621636316364163651636616367163681636916370163711637216373163741637516376163771637816379163801638116382163831638416385163861638716388163891639016391163921639316394163951639616397163981639916400164011640216403164041640516406164071640816409164101641116412164131641416415164161641716418164191642016421164221642316424164251642616427164281642916430164311643216433164341643516436164371643816439164401644116442164431644416445164461644716448164491645016451164521645316454164551645616457164581645916460164611646216463164641646516466164671646816469164701647116472164731647416475164761647716478164791648016481164821648316484164851648616487164881648916490164911649216493164941649516496164971649816499165001650116502165031650416505165061650716508165091651016511165121651316514165151651616517165181651916520165211652216523165241652516526165271652816529165301653116532165331653416535165361653716538165391654016541165421654316544165451654616547165481654916550165511655216553165541655516556165571655816559165601656116562165631656416565165661656716568165691657016571165721657316574165751657616577165781657916580165811658216583165841658516586165871658816589165901659116592165931659416595165961659716598165991660016601166021660316604166051660616607166081660916610166111661216613166141661516616166171661816619166201662116622166231662416625166261662716628166291663016631166321663316634166351663616637166381663916640166411664216643166441664516646166471664816649166501665116652166531665416655166561665716658166591666016661166621666316664166651666616667166681666916670166711667216673166741667516676166771667816679166801668116682166831668416685166861668716688166891669016691166921669316694166951669616697166981669916700167011670216703167041670516706167071670816709167101671116712167131671416715167161671716718167191672016721167221672316724167251672616727167281672916730167311673216733167341673516736167371673816739167401674116742167431674416745167461674716748167491675016751167521675316754167551675616757167581675916760167611676216763167641676516766167671676816769167701677116772167731677416775167761677716778167791678016781167821678316784167851678616787167881678916790167911679216793167941679516796167971679816799168001680116802168031680416805168061680716808168091681016811168121681316814168151681616817168181681916820168211682216823168241682516826168271682816829168301683116832168331683416835168361683716838168391684016841168421684316844168451684616847168481684916850168511685216853168541685516856168571685816859168601686116862168631686416865168661686716868168691687016871168721687316874168751687616877168781687916880168811688216883168841688516886168871688816889168901689116892168931689416895168961689716898168991690016901169021690316904169051690616907169081690916910169111691216913169141691516916169171691816919169201692116922169231692416925169261692716928169291693016931169321693316934169351693616937169381693916940169411694216943169441694516946169471694816949169501695116952169531695416955169561695716958169591696016961169621696316964169651696616967169681696916970169711697216973169741697516976169771697816979169801698116982169831698416985169861698716988169891699016991169921699316994169951699616997169981699917000170011700217003170041700517006170071700817009170101701117012170131701417015170161701717018170191702017021170221702317024170251702617027170281702917030170311703217033170341703517036170371703817039170401704117042170431704417045170461704717048170491705017051170521705317054170551705617057170581705917060170611706217063170641706517066170671706817069170701707117072170731707417075170761707717078170791708017081170821708317084170851708617087170881708917090170911709217093170941709517096170971709817099171001710117102171031710417105171061710717108171091711017111171121711317114171151711617117171181711917120171211712217123171241712517126171271712817129171301713117132171331713417135171361713717138171391714017141171421714317144171451714617147171481714917150171511715217153171541715517156171571715817159171601716117162171631716417165171661716717168171691717017171171721717317174171751717617177171781717917180171811718217183171841718517186171871718817189171901719117192171931719417195171961719717198171991720017201172021720317204172051720617207172081720917210172111721217213172141721517216172171721817219172201722117222172231722417225172261722717228172291723017231172321723317234172351723617237172381723917240172411724217243172441724517246172471724817249172501725117252172531725417255172561725717258172591726017261172621726317264172651726617267172681726917270172711727217273172741727517276172771727817279172801728117282172831728417285172861728717288172891729017291172921729317294172951729617297172981729917300173011730217303173041730517306173071730817309173101731117312173131731417315173161731717318173191732017321173221732317324173251732617327173281732917330173311733217333173341733517336173371733817339173401734117342173431734417345173461734717348173491735017351173521735317354173551735617357173581735917360173611736217363173641736517366173671736817369173701737117372173731737417375173761737717378173791738017381173821738317384173851738617387173881738917390173911739217393173941739517396173971739817399174001740117402174031740417405174061740717408174091741017411174121741317414174151741617417174181741917420174211742217423174241742517426174271742817429174301743117432174331743417435174361743717438174391744017441174421744317444174451744617447174481744917450174511745217453174541745517456174571745817459174601746117462174631746417465174661746717468174691747017471174721747317474174751747617477174781747917480174811748217483174841748517486174871748817489174901749117492174931749417495174961749717498174991750017501175021750317504175051750617507175081750917510175111751217513175141751517516175171751817519175201752117522175231752417525175261752717528175291753017531175321753317534175351753617537175381753917540175411754217543175441754517546175471754817549175501755117552175531755417555175561755717558175591756017561175621756317564175651756617567175681756917570175711757217573175741757517576175771757817579175801758117582175831758417585175861758717588175891759017591175921759317594175951759617597175981759917600176011760217603176041760517606176071760817609176101761117612176131761417615176161761717618176191762017621176221762317624176251762617627176281762917630176311763217633176341763517636176371763817639176401764117642176431764417645176461764717648176491765017651176521765317654176551765617657176581765917660176611766217663176641766517666176671766817669176701767117672176731767417675176761767717678176791768017681176821768317684176851768617687176881768917690176911769217693176941769517696176971769817699177001770117702177031770417705177061770717708177091771017711177121771317714177151771617717177181771917720177211772217723177241772517726177271772817729177301773117732177331773417735177361773717738177391774017741177421774317744177451774617747177481774917750177511775217753177541775517756177571775817759177601776117762177631776417765177661776717768177691777017771177721777317774177751777617777177781777917780177811778217783177841778517786177871778817789177901779117792177931779417795177961779717798177991780017801178021780317804178051780617807178081780917810178111781217813178141781517816178171781817819178201782117822178231782417825178261782717828178291783017831178321783317834178351783617837178381783917840178411784217843178441784517846178471784817849178501785117852178531785417855178561785717858178591786017861178621786317864178651786617867178681786917870178711787217873178741787517876178771787817879178801788117882178831788417885178861788717888178891789017891178921789317894178951789617897178981789917900179011790217903179041790517906179071790817909179101791117912179131791417915179161791717918179191792017921179221792317924179251792617927179281792917930179311793217933179341793517936179371793817939179401794117942179431794417945179461794717948179491795017951179521795317954179551795617957179581795917960179611796217963179641796517966179671796817969179701797117972179731797417975179761797717978179791798017981179821798317984179851798617987179881798917990179911799217993179941799517996179971799817999180001800118002180031800418005180061800718008180091801018011180121801318014180151801618017180181801918020180211802218023180241802518026180271802818029180301803118032180331803418035180361803718038180391804018041180421804318044180451804618047180481804918050180511805218053180541805518056180571805818059180601806118062180631806418065180661806718068180691807018071180721807318074180751807618077180781807918080180811808218083180841808518086180871808818089180901809118092180931809418095180961809718098180991810018101181021810318104181051810618107181081810918110181111811218113181141811518116181171811818119181201812118122181231812418125181261812718128181291813018131181321813318134181351813618137181381813918140181411814218143181441814518146181471814818149181501815118152181531815418155181561815718158181591816018161181621816318164181651816618167181681816918170181711817218173181741817518176181771817818179181801818118182181831818418185181861818718188181891819018191181921819318194181951819618197181981819918200182011820218203182041820518206182071820818209182101821118212182131821418215182161821718218182191822018221182221822318224182251822618227182281822918230182311823218233182341823518236182371823818239182401824118242182431824418245182461824718248182491825018251182521825318254182551825618257182581825918260182611826218263182641826518266182671826818269182701827118272182731827418275182761827718278182791828018281182821828318284182851828618287182881828918290182911829218293182941829518296182971829818299183001830118302183031830418305183061830718308183091831018311183121831318314183151831618317183181831918320183211832218323183241832518326183271832818329183301833118332183331833418335183361833718338183391834018341183421834318344183451834618347183481834918350183511835218353183541835518356183571835818359183601836118362183631836418365183661836718368183691837018371183721837318374183751837618377183781837918380183811838218383183841838518386183871838818389183901839118392183931839418395183961839718398183991840018401184021840318404184051840618407184081840918410184111841218413184141841518416184171841818419184201842118422184231842418425184261842718428184291843018431184321843318434184351843618437184381843918440184411844218443184441844518446184471844818449184501845118452184531845418455184561845718458184591846018461184621846318464184651846618467184681846918470184711847218473184741847518476184771847818479184801848118482184831848418485184861848718488184891849018491184921849318494184951849618497184981849918500185011850218503185041850518506185071850818509185101851118512185131851418515185161851718518185191852018521185221852318524185251852618527185281852918530185311853218533185341853518536185371853818539185401854118542185431854418545185461854718548185491855018551185521855318554185551855618557185581855918560185611856218563185641856518566185671856818569185701857118572185731857418575185761857718578185791858018581185821858318584185851858618587185881858918590185911859218593185941859518596185971859818599186001860118602186031860418605186061860718608186091861018611186121861318614186151861618617186181861918620186211862218623186241862518626186271862818629186301863118632186331863418635186361863718638186391864018641186421864318644186451864618647186481864918650186511865218653186541865518656186571865818659186601866118662186631866418665186661866718668186691867018671186721867318674186751867618677186781867918680186811868218683186841868518686186871868818689186901869118692186931869418695186961869718698186991870018701187021870318704187051870618707187081870918710187111871218713187141871518716187171871818719187201872118722187231872418725187261872718728187291873018731187321873318734187351873618737187381873918740187411874218743187441874518746187471874818749187501875118752187531875418755187561875718758187591876018761187621876318764187651876618767187681876918770187711877218773187741877518776187771877818779187801878118782187831878418785187861878718788187891879018791187921879318794187951879618797187981879918800188011880218803188041880518806188071880818809188101881118812188131881418815188161881718818188191882018821188221882318824188251882618827188281882918830188311883218833188341883518836188371883818839188401884118842188431884418845188461884718848188491885018851188521885318854188551885618857188581885918860188611886218863188641886518866188671886818869188701887118872188731887418875188761887718878188791888018881188821888318884188851888618887188881888918890188911889218893188941889518896188971889818899189001890118902189031890418905189061890718908189091891018911189121891318914189151891618917189181891918920189211892218923189241892518926189271892818929189301893118932189331893418935189361893718938189391894018941189421894318944189451894618947189481894918950189511895218953189541895518956189571895818959189601896118962189631896418965189661896718968189691897018971189721897318974189751897618977189781897918980189811898218983189841898518986189871898818989189901899118992189931899418995189961899718998189991900019001190021900319004190051900619007190081900919010190111901219013190141901519016190171901819019190201902119022190231902419025190261902719028190291903019031190321903319034190351903619037190381903919040190411904219043190441904519046190471904819049190501905119052190531905419055190561905719058190591906019061190621906319064190651906619067190681906919070190711907219073190741907519076190771907819079190801908119082190831908419085190861908719088190891909019091190921909319094190951909619097190981909919100191011910219103191041910519106191071910819109191101911119112191131911419115191161911719118191191912019121191221912319124191251912619127191281912919130191311913219133191341913519136191371913819139191401914119142191431914419145191461914719148191491915019151191521915319154191551915619157191581915919160191611916219163191641916519166191671916819169191701917119172191731917419175191761917719178191791918019181191821918319184191851918619187191881918919190191911919219193191941919519196191971919819199192001920119202192031920419205192061920719208192091921019211192121921319214192151921619217192181921919220192211922219223192241922519226192271922819229192301923119232192331923419235192361923719238192391924019241192421924319244192451924619247192481924919250192511925219253192541925519256192571925819259192601926119262192631926419265192661926719268192691927019271192721927319274192751927619277192781927919280192811928219283192841928519286192871928819289192901929119292192931929419295192961929719298192991930019301193021930319304193051930619307193081930919310193111931219313193141931519316193171931819319193201932119322193231932419325193261932719328193291933019331193321933319334193351933619337193381933919340193411934219343193441934519346193471934819349193501935119352193531935419355193561935719358193591936019361193621936319364193651936619367193681936919370193711937219373193741937519376193771937819379193801938119382193831938419385193861938719388193891939019391193921939319394193951939619397193981939919400194011940219403194041940519406194071940819409194101941119412194131941419415194161941719418194191942019421194221942319424194251942619427194281942919430194311943219433194341943519436194371943819439194401944119442194431944419445194461944719448194491945019451194521945319454194551945619457194581945919460194611946219463194641946519466194671946819469194701947119472194731947419475194761947719478194791948019481194821948319484194851948619487194881948919490194911949219493194941949519496194971949819499195001950119502195031950419505195061950719508195091951019511195121951319514195151951619517195181951919520195211952219523195241952519526195271952819529195301953119532195331953419535195361953719538195391954019541195421954319544195451954619547195481954919550195511955219553195541955519556195571955819559195601956119562195631956419565195661956719568195691957019571195721957319574195751957619577195781957919580195811958219583195841958519586195871958819589195901959119592195931959419595195961959719598195991960019601196021960319604196051960619607196081960919610196111961219613196141961519616196171961819619196201962119622196231962419625196261962719628196291963019631196321963319634196351963619637196381963919640196411964219643196441964519646196471964819649196501965119652196531965419655196561965719658196591966019661196621966319664196651966619667196681966919670196711967219673196741967519676196771967819679196801968119682196831968419685196861968719688196891969019691196921969319694196951969619697196981969919700197011970219703197041970519706197071970819709197101971119712197131971419715197161971719718197191972019721197221972319724197251972619727197281972919730197311973219733197341973519736197371973819739197401974119742197431974419745197461974719748197491975019751197521975319754197551975619757197581975919760197611976219763197641976519766197671976819769197701977119772197731977419775197761977719778197791978019781197821978319784197851978619787197881978919790197911979219793197941979519796197971979819799198001980119802198031980419805198061980719808198091981019811198121981319814198151981619817198181981919820198211982219823198241982519826198271982819829198301983119832198331983419835198361983719838198391984019841198421984319844198451984619847198481984919850198511985219853198541985519856198571985819859198601986119862198631986419865198661986719868198691987019871198721987319874198751987619877198781987919880198811988219883198841988519886198871988819889198901989119892198931989419895198961989719898198991990019901199021990319904199051990619907199081990919910199111991219913199141991519916199171991819919199201992119922199231992419925199261992719928199291993019931199321993319934199351993619937199381993919940199411994219943199441994519946199471994819949199501995119952199531995419955199561995719958199591996019961199621996319964199651996619967199681996919970199711997219973199741997519976199771997819979199801998119982199831998419985199861998719988199891999019991199921999319994199951999619997199981999920000200012000220003200042000520006200072000820009200102001120012200132001420015200162001720018200192002020021200222002320024200252002620027200282002920030200312003220033200342003520036200372003820039200402004120042200432004420045200462004720048200492005020051200522005320054200552005620057200582005920060200612006220063200642006520066200672006820069200702007120072200732007420075200762007720078200792008020081200822008320084200852008620087200882008920090200912009220093200942009520096200972009820099201002010120102201032010420105201062010720108201092011020111201122011320114201152011620117201182011920120201212012220123201242012520126201272012820129201302013120132201332013420135201362013720138201392014020141201422014320144201452014620147201482014920150201512015220153201542015520156201572015820159201602016120162201632016420165201662016720168201692017020171201722017320174201752017620177201782017920180201812018220183201842018520186201872018820189201902019120192201932019420195201962019720198201992020020201202022020320204202052020620207202082020920210202112021220213202142021520216202172021820219202202022120222202232022420225202262022720228202292023020231202322023320234202352023620237202382023920240202412024220243202442024520246202472024820249202502025120252202532025420255202562025720258202592026020261202622026320264202652026620267202682026920270202712027220273202742027520276202772027820279202802028120282202832028420285202862028720288202892029020291202922029320294202952029620297202982029920300203012030220303203042030520306203072030820309203102031120312203132031420315203162031720318203192032020321203222032320324203252032620327203282032920330203312033220333203342033520336203372033820339203402034120342203432034420345203462034720348203492035020351203522035320354203552035620357203582035920360203612036220363203642036520366203672036820369203702037120372203732037420375203762037720378203792038020381203822038320384203852038620387203882038920390203912039220393203942039520396203972039820399204002040120402204032040420405204062040720408204092041020411204122041320414204152041620417204182041920420204212042220423204242042520426204272042820429204302043120432204332043420435204362043720438204392044020441204422044320444204452044620447204482044920450204512045220453204542045520456204572045820459204602046120462204632046420465204662046720468204692047020471204722047320474204752047620477204782047920480204812048220483204842048520486204872048820489204902049120492204932049420495204962049720498204992050020501205022050320504205052050620507205082050920510205112051220513205142051520516205172051820519205202052120522205232052420525205262052720528205292053020531205322053320534205352053620537205382053920540205412054220543205442054520546205472054820549205502055120552205532055420555205562055720558205592056020561205622056320564205652056620567205682056920570205712057220573205742057520576205772057820579205802058120582205832058420585205862058720588205892059020591205922059320594205952059620597205982059920600206012060220603206042060520606206072060820609206102061120612206132061420615206162061720618206192062020621206222062320624206252062620627206282062920630206312063220633206342063520636206372063820639206402064120642206432064420645206462064720648206492065020651206522065320654206552065620657206582065920660206612066220663206642066520666206672066820669206702067120672206732067420675206762067720678206792068020681206822068320684206852068620687206882068920690206912069220693206942069520696206972069820699207002070120702207032070420705207062070720708207092071020711207122071320714207152071620717207182071920720207212072220723207242072520726207272072820729207302073120732207332073420735207362073720738207392074020741207422074320744207452074620747207482074920750207512075220753207542075520756207572075820759207602076120762207632076420765207662076720768207692077020771207722077320774207752077620777207782077920780207812078220783207842078520786207872078820789207902079120792207932079420795207962079720798207992080020801208022080320804208052080620807208082080920810208112081220813208142081520816208172081820819208202082120822208232082420825208262082720828208292083020831208322083320834208352083620837208382083920840208412084220843208442084520846208472084820849208502085120852208532085420855208562085720858208592086020861208622086320864208652086620867208682086920870208712087220873208742087520876208772087820879208802088120882208832088420885208862088720888208892089020891208922089320894208952089620897208982089920900209012090220903209042090520906209072090820909209102091120912209132091420915209162091720918209192092020921209222092320924209252092620927209282092920930209312093220933209342093520936209372093820939209402094120942209432094420945209462094720948209492095020951209522095320954209552095620957209582095920960209612096220963209642096520966209672096820969209702097120972209732097420975209762097720978209792098020981209822098320984209852098620987209882098920990209912099220993209942099520996209972099820999210002100121002210032100421005210062100721008210092101021011210122101321014210152101621017210182101921020210212102221023210242102521026210272102821029210302103121032210332103421035210362103721038210392104021041210422104321044210452104621047210482104921050210512105221053210542105521056210572105821059210602106121062210632106421065210662106721068210692107021071210722107321074210752107621077210782107921080210812108221083210842108521086210872108821089210902109121092210932109421095210962109721098210992110021101211022110321104211052110621107211082110921110211112111221113211142111521116211172111821119211202112121122211232112421125211262112721128211292113021131211322113321134211352113621137211382113921140211412114221143211442114521146211472114821149211502115121152211532115421155211562115721158211592116021161211622116321164211652116621167211682116921170211712117221173211742117521176211772117821179211802118121182211832118421185211862118721188211892119021191211922119321194211952119621197211982119921200212012120221203212042120521206212072120821209212102121121212212132121421215212162121721218212192122021221212222122321224212252122621227212282122921230212312123221233212342123521236212372123821239212402124121242212432124421245212462124721248212492125021251212522125321254212552125621257212582125921260212612126221263212642126521266212672126821269212702127121272212732127421275212762127721278212792128021281212822128321284212852128621287212882128921290212912129221293212942129521296212972129821299213002130121302213032130421305213062130721308213092131021311213122131321314213152131621317213182131921320213212132221323213242132521326213272132821329213302133121332213332133421335213362133721338213392134021341213422134321344213452134621347213482134921350213512135221353213542135521356213572135821359213602136121362213632136421365213662136721368213692137021371213722137321374213752137621377213782137921380213812138221383213842138521386213872138821389213902139121392213932139421395213962139721398213992140021401214022140321404214052140621407214082140921410214112141221413214142141521416214172141821419214202142121422214232142421425214262142721428214292143021431214322143321434214352143621437214382143921440214412144221443214442144521446214472144821449214502145121452214532145421455214562145721458214592146021461214622146321464214652146621467214682146921470214712147221473214742147521476214772147821479214802148121482214832148421485214862148721488214892149021491214922149321494214952149621497214982149921500215012150221503215042150521506215072150821509215102151121512215132151421515215162151721518215192152021521215222152321524215252152621527215282152921530215312153221533215342153521536215372153821539215402154121542215432154421545215462154721548215492155021551215522155321554215552155621557215582155921560215612156221563215642156521566215672156821569215702157121572215732157421575215762157721578215792158021581215822158321584215852158621587215882158921590215912159221593215942159521596215972159821599216002160121602216032160421605216062160721608216092161021611216122161321614216152161621617216182161921620216212162221623216242162521626216272162821629216302163121632216332163421635216362163721638216392164021641216422164321644216452164621647216482164921650216512165221653216542165521656216572165821659216602166121662216632166421665216662166721668216692167021671216722167321674216752167621677216782167921680216812168221683216842168521686216872168821689216902169121692216932169421695216962169721698216992170021701217022170321704217052170621707217082170921710217112171221713217142171521716217172171821719217202172121722217232172421725217262172721728217292173021731217322173321734217352173621737217382173921740217412174221743217442174521746217472174821749217502175121752217532175421755217562175721758217592176021761217622176321764217652176621767217682176921770217712177221773217742177521776217772177821779217802178121782217832178421785217862178721788217892179021791217922179321794217952179621797217982179921800218012180221803218042180521806218072180821809218102181121812218132181421815218162181721818218192182021821218222182321824218252182621827218282182921830218312183221833218342183521836218372183821839218402184121842218432184421845218462184721848218492185021851218522185321854218552185621857218582185921860218612186221863218642186521866218672186821869218702187121872218732187421875218762187721878218792188021881218822188321884218852188621887218882188921890218912189221893218942189521896218972189821899219002190121902219032190421905219062190721908219092191021911219122191321914219152191621917219182191921920219212192221923219242192521926219272192821929219302193121932219332193421935219362193721938219392194021941219422194321944219452194621947219482194921950219512195221953219542195521956219572195821959219602196121962219632196421965219662196721968219692197021971219722197321974219752197621977219782197921980219812198221983219842198521986219872198821989219902199121992219932199421995219962199721998219992200022001220022200322004220052200622007220082200922010220112201222013220142201522016220172201822019220202202122022220232202422025220262202722028220292203022031220322203322034220352203622037220382203922040220412204222043220442204522046220472204822049220502205122052220532205422055220562205722058220592206022061220622206322064220652206622067220682206922070220712207222073220742207522076220772207822079220802208122082220832208422085220862208722088220892209022091220922209322094220952209622097220982209922100221012210222103221042210522106221072210822109221102211122112221132211422115221162211722118221192212022121221222212322124221252212622127221282212922130221312213222133221342213522136221372213822139221402214122142221432214422145221462214722148221492215022151221522215322154221552215622157221582215922160221612216222163221642216522166221672216822169221702217122172221732217422175221762217722178221792218022181221822218322184221852218622187221882218922190221912219222193221942219522196221972219822199222002220122202222032220422205222062220722208222092221022211222122221322214222152221622217222182221922220222212222222223222242222522226222272222822229222302223122232222332223422235222362223722238222392224022241222422224322244222452224622247222482224922250222512225222253222542225522256222572225822259222602226122262222632226422265222662226722268222692227022271222722227322274222752227622277222782227922280222812228222283222842228522286222872228822289222902229122292222932229422295222962229722298222992230022301223022230322304223052230622307223082230922310223112231222313223142231522316223172231822319223202232122322223232232422325223262232722328223292233022331223322233322334223352233622337223382233922340223412234222343223442234522346223472234822349223502235122352223532235422355223562235722358223592236022361223622236322364223652236622367223682236922370223712237222373223742237522376223772237822379223802238122382223832238422385223862238722388223892239022391223922239322394223952239622397223982239922400224012240222403224042240522406224072240822409224102241122412224132241422415224162241722418224192242022421224222242322424224252242622427224282242922430224312243222433224342243522436224372243822439224402244122442224432244422445224462244722448224492245022451224522245322454224552245622457224582245922460224612246222463224642246522466224672246822469224702247122472224732247422475224762247722478224792248022481224822248322484224852248622487224882248922490224912249222493224942249522496224972249822499225002250122502225032250422505225062250722508225092251022511225122251322514225152251622517225182251922520225212252222523225242252522526225272252822529225302253122532225332253422535225362253722538225392254022541225422254322544225452254622547225482254922550225512255222553225542255522556225572255822559225602256122562225632256422565225662256722568225692257022571225722257322574225752257622577225782257922580225812258222583225842258522586225872258822589225902259122592225932259422595225962259722598225992260022601226022260322604226052260622607226082260922610226112261222613226142261522616226172261822619226202262122622226232262422625226262262722628226292263022631226322263322634226352263622637226382263922640226412264222643226442264522646226472264822649226502265122652226532265422655226562265722658226592266022661226622266322664226652266622667226682266922670226712267222673226742267522676226772267822679226802268122682226832268422685226862268722688226892269022691226922269322694226952269622697226982269922700227012270222703227042270522706227072270822709227102271122712227132271422715227162271722718227192272022721227222272322724227252272622727227282272922730227312273222733227342273522736227372273822739227402274122742227432274422745227462274722748227492275022751227522275322754227552275622757227582275922760227612276222763227642276522766227672276822769227702277122772227732277422775227762277722778227792278022781227822278322784227852278622787227882278922790227912279222793227942279522796227972279822799228002280122802228032280422805228062280722808228092281022811228122281322814228152281622817228182281922820228212282222823228242282522826228272282822829228302283122832228332283422835228362283722838228392284022841228422284322844228452284622847228482284922850228512285222853228542285522856228572285822859228602286122862228632286422865228662286722868228692287022871228722287322874228752287622877228782287922880228812288222883228842288522886228872288822889228902289122892228932289422895228962289722898228992290022901229022290322904229052290622907229082290922910229112291222913229142291522916229172291822919229202292122922229232292422925229262292722928229292293022931229322293322934229352293622937229382293922940229412294222943229442294522946229472294822949229502295122952229532295422955229562295722958229592296022961229622296322964229652296622967229682296922970229712297222973229742297522976229772297822979229802298122982229832298422985229862298722988229892299022991229922299322994229952299622997229982299923000230012300223003230042300523006230072300823009230102301123012230132301423015230162301723018230192302023021230222302323024230252302623027230282302923030230312303223033230342303523036230372303823039230402304123042230432304423045230462304723048230492305023051230522305323054230552305623057230582305923060230612306223063230642306523066230672306823069230702307123072230732307423075230762307723078230792308023081230822308323084230852308623087230882308923090230912309223093230942309523096230972309823099231002310123102231032310423105231062310723108231092311023111231122311323114231152311623117231182311923120231212312223123231242312523126231272312823129231302313123132231332313423135231362313723138231392314023141231422314323144231452314623147231482314923150231512315223153231542315523156231572315823159231602316123162231632316423165231662316723168231692317023171231722317323174231752317623177231782317923180231812318223183231842318523186231872318823189231902319123192231932319423195231962319723198231992320023201232022320323204232052320623207232082320923210232112321223213232142321523216232172321823219232202322123222232232322423225232262322723228232292323023231232322323323234232352323623237232382323923240232412324223243232442324523246232472324823249232502325123252232532325423255232562325723258232592326023261232622326323264232652326623267232682326923270232712327223273232742327523276232772327823279232802328123282232832328423285232862328723288232892329023291232922329323294232952329623297232982329923300233012330223303233042330523306233072330823309233102331123312233132331423315233162331723318233192332023321233222332323324233252332623327233282332923330233312333223333233342333523336233372333823339233402334123342233432334423345233462334723348233492335023351233522335323354233552335623357233582335923360233612336223363233642336523366233672336823369233702337123372233732337423375233762337723378233792338023381233822338323384233852338623387233882338923390233912339223393233942339523396233972339823399234002340123402234032340423405234062340723408234092341023411234122341323414234152341623417234182341923420234212342223423234242342523426234272342823429234302343123432234332343423435234362343723438234392344023441234422344323444234452344623447234482344923450234512345223453234542345523456234572345823459234602346123462234632346423465234662346723468234692347023471234722347323474234752347623477234782347923480234812348223483234842348523486234872348823489234902349123492234932349423495234962349723498234992350023501235022350323504235052350623507235082350923510235112351223513235142351523516235172351823519235202352123522235232352423525235262352723528235292353023531235322353323534235352353623537235382353923540235412354223543235442354523546235472354823549235502355123552235532355423555235562355723558235592356023561235622356323564235652356623567235682356923570235712357223573235742357523576235772357823579235802358123582235832358423585235862358723588235892359023591235922359323594235952359623597235982359923600236012360223603236042360523606236072360823609236102361123612236132361423615236162361723618236192362023621236222362323624236252362623627236282362923630236312363223633236342363523636236372363823639236402364123642236432364423645236462364723648236492365023651236522365323654236552365623657236582365923660236612366223663236642366523666236672366823669236702367123672236732367423675236762367723678236792368023681236822368323684236852368623687236882368923690236912369223693236942369523696236972369823699237002370123702237032370423705237062370723708237092371023711237122371323714237152371623717237182371923720237212372223723237242372523726237272372823729237302373123732237332373423735237362373723738237392374023741237422374323744237452374623747237482374923750237512375223753237542375523756237572375823759237602376123762237632376423765237662376723768237692377023771237722377323774237752377623777237782377923780237812378223783237842378523786237872378823789237902379123792237932379423795237962379723798237992380023801238022380323804238052380623807238082380923810238112381223813238142381523816238172381823819238202382123822238232382423825238262382723828238292383023831238322383323834238352383623837238382383923840238412384223843238442384523846238472384823849238502385123852238532385423855238562385723858238592386023861238622386323864238652386623867238682386923870238712387223873238742387523876238772387823879238802388123882238832388423885238862388723888238892389023891238922389323894238952389623897238982389923900239012390223903239042390523906239072390823909239102391123912239132391423915239162391723918239192392023921239222392323924239252392623927239282392923930239312393223933239342393523936239372393823939239402394123942239432394423945239462394723948239492395023951239522395323954239552395623957239582395923960239612396223963239642396523966239672396823969239702397123972239732397423975239762397723978239792398023981239822398323984239852398623987239882398923990239912399223993239942399523996239972399823999240002400124002240032400424005240062400724008240092401024011240122401324014240152401624017240182401924020240212402224023240242402524026240272402824029240302403124032240332403424035240362403724038240392404024041240422404324044240452404624047240482404924050240512405224053240542405524056240572405824059240602406124062240632406424065240662406724068240692407024071240722407324074240752407624077240782407924080240812408224083240842408524086240872408824089240902409124092240932409424095240962409724098240992410024101241022410324104241052410624107241082410924110241112411224113241142411524116241172411824119241202412124122241232412424125241262412724128241292413024131241322413324134241352413624137241382413924140241412414224143241442414524146241472414824149241502415124152241532415424155241562415724158241592416024161241622416324164241652416624167241682416924170241712417224173241742417524176241772417824179241802418124182241832418424185241862418724188241892419024191241922419324194241952419624197241982419924200242012420224203242042420524206242072420824209242102421124212242132421424215242162421724218242192422024221242222422324224242252422624227242282422924230242312423224233242342423524236242372423824239242402424124242242432424424245242462424724248242492425024251242522425324254242552425624257242582425924260242612426224263242642426524266242672426824269242702427124272242732427424275242762427724278242792428024281242822428324284242852428624287242882428924290242912429224293242942429524296242972429824299243002430124302243032430424305243062430724308243092431024311243122431324314243152431624317243182431924320243212432224323243242432524326243272432824329243302433124332243332433424335243362433724338243392434024341243422434324344243452434624347243482434924350243512435224353243542435524356243572435824359243602436124362243632436424365243662436724368243692437024371243722437324374243752437624377243782437924380243812438224383243842438524386243872438824389243902439124392243932439424395243962439724398243992440024401244022440324404244052440624407244082440924410244112441224413244142441524416244172441824419244202442124422244232442424425244262442724428244292443024431244322443324434244352443624437244382443924440244412444224443244442444524446244472444824449244502445124452244532445424455244562445724458244592446024461244622446324464244652446624467244682446924470244712447224473244742447524476244772447824479244802448124482244832448424485244862448724488244892449024491244922449324494244952449624497244982449924500245012450224503245042450524506245072450824509245102451124512245132451424515245162451724518245192452024521245222452324524245252452624527245282452924530245312453224533245342453524536245372453824539245402454124542245432454424545245462454724548245492455024551245522455324554245552455624557245582455924560245612456224563245642456524566245672456824569245702457124572245732457424575245762457724578245792458024581245822458324584245852458624587245882458924590245912459224593245942459524596245972459824599246002460124602246032460424605246062460724608246092461024611246122461324614246152461624617246182461924620246212462224623246242462524626246272462824629246302463124632246332463424635246362463724638246392464024641246422464324644246452464624647246482464924650246512465224653246542465524656246572465824659246602466124662246632466424665246662466724668246692467024671246722467324674246752467624677246782467924680246812468224683246842468524686246872468824689246902469124692246932469424695246962469724698246992470024701247022470324704247052470624707247082470924710247112471224713247142471524716247172471824719247202472124722247232472424725247262472724728247292473024731247322473324734247352473624737247382473924740247412474224743247442474524746247472474824749247502475124752247532475424755247562475724758247592476024761247622476324764247652476624767247682476924770247712477224773247742477524776247772477824779247802478124782247832478424785247862478724788247892479024791247922479324794247952479624797247982479924800248012480224803248042480524806248072480824809248102481124812248132481424815248162481724818248192482024821248222482324824248252482624827248282482924830248312483224833248342483524836248372483824839248402484124842248432484424845248462484724848248492485024851248522485324854248552485624857248582485924860248612486224863248642486524866248672486824869248702487124872248732487424875248762487724878248792488024881248822488324884248852488624887248882488924890248912489224893248942489524896248972489824899249002490124902249032490424905249062490724908249092491024911249122491324914249152491624917249182491924920249212492224923249242492524926249272492824929249302493124932249332493424935249362493724938249392494024941249422494324944249452494624947249482494924950249512495224953249542495524956249572495824959249602496124962249632496424965249662496724968249692497024971249722497324974249752497624977249782497924980249812498224983249842498524986249872498824989249902499124992249932499424995249962499724998249992500025001250022500325004250052500625007250082500925010250112501225013250142501525016250172501825019250202502125022250232502425025250262502725028250292503025031250322503325034250352503625037250382503925040250412504225043250442504525046250472504825049250502505125052250532505425055250562505725058250592506025061250622506325064250652506625067250682506925070250712507225073250742507525076250772507825079250802508125082250832508425085250862508725088250892509025091250922509325094250952509625097250982509925100251012510225103251042510525106251072510825109251102511125112251132511425115251162511725118251192512025121251222512325124251252512625127251282512925130251312513225133251342513525136251372513825139251402514125142251432514425145251462514725148251492515025151251522515325154251552515625157251582515925160251612516225163251642516525166251672516825169251702517125172251732517425175251762517725178251792518025181251822518325184251852518625187251882518925190251912519225193251942519525196251972519825199252002520125202252032520425205252062520725208252092521025211252122521325214252152521625217252182521925220252212522225223252242522525226252272522825229252302523125232252332523425235252362523725238252392524025241252422524325244252452524625247252482524925250252512525225253252542525525256252572525825259252602526125262252632526425265252662526725268252692527025271252722527325274252752527625277252782527925280252812528225283252842528525286252872528825289252902529125292252932529425295252962529725298252992530025301253022530325304253052530625307253082530925310253112531225313253142531525316253172531825319253202532125322253232532425325253262532725328253292533025331253322533325334253352533625337253382533925340253412534225343253442534525346253472534825349253502535125352253532535425355253562535725358253592536025361253622536325364253652536625367253682536925370253712537225373253742537525376253772537825379253802538125382253832538425385253862538725388253892539025391253922539325394253952539625397253982539925400254012540225403254042540525406254072540825409254102541125412254132541425415254162541725418254192542025421254222542325424254252542625427254282542925430254312543225433254342543525436254372543825439254402544125442254432544425445254462544725448254492545025451254522545325454254552545625457254582545925460254612546225463254642546525466254672546825469254702547125472254732547425475254762547725478254792548025481254822548325484254852548625487254882548925490254912549225493254942549525496254972549825499255002550125502255032550425505255062550725508255092551025511255122551325514255152551625517255182551925520255212552225523255242552525526255272552825529255302553125532255332553425535255362553725538255392554025541255422554325544255452554625547255482554925550255512555225553255542555525556255572555825559255602556125562255632556425565255662556725568255692557025571255722557325574255752557625577255782557925580255812558225583255842558525586255872558825589255902559125592255932559425595255962559725598255992560025601256022560325604256052560625607256082560925610256112561225613256142561525616256172561825619256202562125622256232562425625256262562725628256292563025631256322563325634256352563625637256382563925640256412564225643256442564525646256472564825649256502565125652256532565425655256562565725658256592566025661256622566325664256652566625667256682566925670256712567225673256742567525676256772567825679256802568125682256832568425685256862568725688256892569025691256922569325694256952569625697256982569925700257012570225703257042570525706257072570825709257102571125712257132571425715257162571725718257192572025721257222572325724257252572625727257282572925730257312573225733257342573525736257372573825739257402574125742257432574425745257462574725748257492575025751257522575325754257552575625757257582575925760257612576225763257642576525766257672576825769257702577125772257732577425775257762577725778257792578025781257822578325784257852578625787257882578925790257912579225793257942579525796257972579825799258002580125802258032580425805258062580725808258092581025811258122581325814258152581625817258182581925820258212582225823258242582525826258272582825829258302583125832258332583425835258362583725838258392584025841258422584325844258452584625847258482584925850258512585225853258542585525856258572585825859258602586125862258632586425865258662586725868258692587025871258722587325874258752587625877258782587925880258812588225883258842588525886258872588825889258902589125892258932589425895258962589725898258992590025901259022590325904259052590625907259082590925910259112591225913259142591525916259172591825919259202592125922259232592425925259262592725928259292593025931259322593325934259352593625937259382593925940259412594225943259442594525946259472594825949259502595125952259532595425955259562595725958259592596025961259622596325964259652596625967259682596925970259712597225973259742597525976259772597825979259802598125982259832598425985259862598725988259892599025991259922599325994259952599625997259982599926000260012600226003260042600526006260072600826009260102601126012260132601426015260162601726018260192602026021260222602326024260252602626027260282602926030260312603226033260342603526036260372603826039260402604126042260432604426045260462604726048260492605026051260522605326054260552605626057260582605926060260612606226063260642606526066260672606826069260702607126072260732607426075260762607726078260792608026081260822608326084260852608626087260882608926090260912609226093260942609526096260972609826099261002610126102261032610426105261062610726108261092611026111261122611326114261152611626117261182611926120261212612226123261242612526126261272612826129261302613126132261332613426135261362613726138261392614026141261422614326144261452614626147261482614926150261512615226153261542615526156261572615826159261602616126162261632616426165261662616726168261692617026171261722617326174261752617626177261782617926180261812618226183261842618526186261872618826189261902619126192261932619426195261962619726198261992620026201262022620326204262052620626207262082620926210262112621226213262142621526216262172621826219262202622126222262232622426225262262622726228262292623026231262322623326234262352623626237262382623926240262412624226243262442624526246262472624826249262502625126252262532625426255262562625726258262592626026261262622626326264262652626626267262682626926270262712627226273262742627526276262772627826279262802628126282262832628426285262862628726288262892629026291262922629326294262952629626297262982629926300263012630226303263042630526306263072630826309263102631126312263132631426315263162631726318263192632026321263222632326324263252632626327263282632926330263312633226333263342633526336263372633826339263402634126342263432634426345263462634726348263492635026351263522635326354263552635626357263582635926360263612636226363263642636526366263672636826369263702637126372263732637426375263762637726378263792638026381263822638326384263852638626387263882638926390263912639226393263942639526396263972639826399264002640126402264032640426405264062640726408264092641026411264122641326414264152641626417264182641926420264212642226423264242642526426264272642826429264302643126432264332643426435264362643726438264392644026441264422644326444264452644626447264482644926450264512645226453264542645526456264572645826459264602646126462264632646426465264662646726468264692647026471264722647326474264752647626477264782647926480264812648226483264842648526486264872648826489264902649126492264932649426495264962649726498264992650026501265022650326504265052650626507265082650926510265112651226513265142651526516265172651826519265202652126522265232652426525265262652726528265292653026531265322653326534265352653626537265382653926540265412654226543265442654526546265472654826549265502655126552265532655426555265562655726558265592656026561265622656326564265652656626567265682656926570265712657226573265742657526576265772657826579265802658126582265832658426585265862658726588265892659026591265922659326594265952659626597265982659926600266012660226603266042660526606266072660826609266102661126612266132661426615266162661726618266192662026621266222662326624266252662626627266282662926630266312663226633266342663526636266372663826639266402664126642266432664426645266462664726648266492665026651266522665326654266552665626657266582665926660266612666226663266642666526666266672666826669266702667126672266732667426675266762667726678266792668026681266822668326684266852668626687266882668926690266912669226693266942669526696266972669826699267002670126702267032670426705267062670726708267092671026711267122671326714267152671626717267182671926720267212672226723267242672526726267272672826729267302673126732267332673426735267362673726738267392674026741267422674326744267452674626747267482674926750267512675226753267542675526756267572675826759267602676126762267632676426765267662676726768267692677026771267722677326774267752677626777267782677926780267812678226783267842678526786267872678826789267902679126792267932679426795267962679726798267992680026801268022680326804268052680626807268082680926810268112681226813268142681526816268172681826819268202682126822268232682426825268262682726828268292683026831268322683326834268352683626837268382683926840268412684226843268442684526846268472684826849268502685126852268532685426855268562685726858268592686026861268622686326864268652686626867268682686926870268712687226873268742687526876268772687826879268802688126882268832688426885268862688726888268892689026891268922689326894268952689626897268982689926900269012690226903269042690526906269072690826909269102691126912269132691426915269162691726918269192692026921269222692326924269252692626927269282692926930269312693226933269342693526936269372693826939269402694126942269432694426945269462694726948269492695026951269522695326954269552695626957269582695926960269612696226963269642696526966269672696826969269702697126972269732697426975269762697726978269792698026981269822698326984269852698626987269882698926990269912699226993269942699526996269972699826999270002700127002270032700427005270062700727008270092701027011270122701327014270152701627017270182701927020270212702227023270242702527026270272702827029270302703127032270332703427035270362703727038270392704027041270422704327044270452704627047270482704927050270512705227053270542705527056270572705827059270602706127062270632706427065270662706727068270692707027071270722707327074270752707627077270782707927080270812708227083270842708527086270872708827089270902709127092270932709427095270962709727098270992710027101271022710327104271052710627107271082710927110271112711227113271142711527116271172711827119271202712127122271232712427125271262712727128271292713027131271322713327134271352713627137271382713927140271412714227143271442714527146271472714827149271502715127152271532715427155271562715727158271592716027161271622716327164271652716627167271682716927170271712717227173271742717527176271772717827179271802718127182271832718427185271862718727188271892719027191271922719327194271952719627197271982719927200272012720227203272042720527206272072720827209272102721127212272132721427215272162721727218272192722027221272222722327224272252722627227272282722927230272312723227233272342723527236272372723827239272402724127242272432724427245272462724727248272492725027251272522725327254272552725627257272582725927260272612726227263272642726527266272672726827269272702727127272272732727427275272762727727278272792728027281272822728327284272852728627287272882728927290272912729227293272942729527296272972729827299273002730127302273032730427305273062730727308273092731027311273122731327314273152731627317273182731927320273212732227323273242732527326273272732827329273302733127332273332733427335273362733727338273392734027341273422734327344273452734627347273482734927350273512735227353273542735527356273572735827359273602736127362273632736427365273662736727368273692737027371273722737327374273752737627377273782737927380273812738227383273842738527386273872738827389273902739127392273932739427395273962739727398273992740027401274022740327404274052740627407274082740927410274112741227413274142741527416274172741827419274202742127422274232742427425274262742727428274292743027431274322743327434274352743627437274382743927440274412744227443274442744527446274472744827449274502745127452274532745427455274562745727458274592746027461274622746327464274652746627467274682746927470274712747227473274742747527476274772747827479274802748127482274832748427485274862748727488274892749027491274922749327494274952749627497274982749927500275012750227503275042750527506275072750827509275102751127512275132751427515275162751727518275192752027521275222752327524275252752627527275282752927530275312753227533275342753527536275372753827539275402754127542275432754427545275462754727548275492755027551275522755327554275552755627557275582755927560275612756227563275642756527566275672756827569275702757127572275732757427575275762757727578275792758027581275822758327584275852758627587275882758927590275912759227593275942759527596275972759827599276002760127602276032760427605276062760727608276092761027611276122761327614276152761627617276182761927620276212762227623276242762527626276272762827629276302763127632276332763427635276362763727638276392764027641276422764327644276452764627647276482764927650276512765227653276542765527656276572765827659276602766127662276632766427665276662766727668276692767027671276722767327674276752767627677276782767927680276812768227683276842768527686276872768827689276902769127692276932769427695276962769727698276992770027701277022770327704277052770627707277082770927710277112771227713277142771527716277172771827719277202772127722277232772427725277262772727728277292773027731277322773327734277352773627737277382773927740277412774227743277442774527746277472774827749277502775127752277532775427755277562775727758277592776027761277622776327764277652776627767277682776927770277712777227773277742777527776277772777827779277802778127782277832778427785277862778727788277892779027791277922779327794277952779627797277982779927800278012780227803278042780527806278072780827809278102781127812278132781427815278162781727818278192782027821278222782327824278252782627827278282782927830278312783227833278342783527836278372783827839278402784127842278432784427845278462784727848278492785027851278522785327854278552785627857278582785927860278612786227863278642786527866278672786827869278702787127872278732787427875278762787727878278792788027881278822788327884278852788627887278882788927890278912789227893278942789527896278972789827899279002790127902279032790427905279062790727908279092791027911279122791327914279152791627917279182791927920279212792227923279242792527926279272792827929279302793127932279332793427935279362793727938279392794027941279422794327944279452794627947279482794927950279512795227953279542795527956279572795827959279602796127962279632796427965279662796727968279692797027971279722797327974279752797627977279782797927980279812798227983279842798527986279872798827989279902799127992279932799427995279962799727998279992800028001280022800328004280052800628007280082800928010280112801228013280142801528016280172801828019280202802128022280232802428025280262802728028280292803028031280322803328034280352803628037280382803928040280412804228043280442804528046280472804828049280502805128052280532805428055280562805728058280592806028061280622806328064280652806628067280682806928070280712807228073280742807528076280772807828079280802808128082280832808428085280862808728088280892809028091280922809328094280952809628097280982809928100281012810228103281042810528106281072810828109281102811128112281132811428115281162811728118281192812028121281222812328124281252812628127281282812928130281312813228133281342813528136281372813828139281402814128142281432814428145281462814728148281492815028151281522815328154281552815628157281582815928160281612816228163281642816528166281672816828169281702817128172281732817428175281762817728178281792818028181281822818328184281852818628187281882818928190281912819228193281942819528196281972819828199282002820128202282032820428205282062820728208282092821028211282122821328214282152821628217282182821928220282212822228223282242822528226282272822828229282302823128232282332823428235282362823728238282392824028241282422824328244282452824628247282482824928250282512825228253282542825528256282572825828259282602826128262282632826428265282662826728268282692827028271282722827328274282752827628277282782827928280282812828228283282842828528286282872828828289282902829128292282932829428295282962829728298282992830028301283022830328304283052830628307283082830928310283112831228313283142831528316283172831828319283202832128322283232832428325283262832728328283292833028331283322833328334283352833628337283382833928340283412834228343283442834528346283472834828349283502835128352283532835428355283562835728358283592836028361283622836328364283652836628367283682836928370283712837228373283742837528376283772837828379283802838128382283832838428385283862838728388283892839028391283922839328394283952839628397283982839928400284012840228403284042840528406284072840828409284102841128412284132841428415284162841728418284192842028421284222842328424284252842628427284282842928430284312843228433284342843528436284372843828439284402844128442284432844428445284462844728448284492845028451284522845328454284552845628457284582845928460284612846228463284642846528466284672846828469284702847128472284732847428475284762847728478284792848028481284822848328484284852848628487284882848928490284912849228493284942849528496284972849828499285002850128502285032850428505285062850728508285092851028511285122851328514285152851628517285182851928520285212852228523285242852528526285272852828529285302853128532285332853428535285362853728538285392854028541285422854328544285452854628547285482854928550285512855228553285542855528556285572855828559285602856128562285632856428565285662856728568285692857028571285722857328574285752857628577285782857928580285812858228583285842858528586285872858828589285902859128592285932859428595285962859728598285992860028601286022860328604286052860628607286082860928610286112861228613286142861528616286172861828619286202862128622286232862428625286262862728628286292863028631286322863328634286352863628637286382863928640286412864228643286442864528646286472864828649286502865128652286532865428655286562865728658286592866028661286622866328664286652866628667286682866928670286712867228673286742867528676286772867828679286802868128682286832868428685286862868728688286892869028691286922869328694286952869628697286982869928700287012870228703287042870528706287072870828709287102871128712287132871428715287162871728718287192872028721287222872328724287252872628727287282872928730287312873228733287342873528736287372873828739287402874128742287432874428745287462874728748287492875028751287522875328754287552875628757287582875928760287612876228763287642876528766287672876828769287702877128772287732877428775287762877728778287792878028781287822878328784287852878628787287882878928790287912879228793287942879528796287972879828799288002880128802288032880428805288062880728808288092881028811288122881328814288152881628817288182881928820288212882228823288242882528826288272882828829288302883128832288332883428835288362883728838288392884028841288422884328844288452884628847288482884928850288512885228853288542885528856288572885828859288602886128862288632886428865288662886728868288692887028871288722887328874288752887628877288782887928880288812888228883288842888528886288872888828889288902889128892288932889428895288962889728898288992890028901289022890328904289052890628907289082890928910289112891228913289142891528916289172891828919289202892128922289232892428925289262892728928289292893028931289322893328934289352893628937289382893928940289412894228943289442894528946289472894828949289502895128952289532895428955289562895728958289592896028961289622896328964289652896628967289682896928970289712897228973289742897528976289772897828979289802898128982289832898428985289862898728988289892899028991289922899328994289952899628997289982899929000290012900229003290042900529006290072900829009290102901129012290132901429015290162901729018290192902029021290222902329024290252902629027290282902929030290312903229033290342903529036290372903829039290402904129042290432904429045290462904729048290492905029051290522905329054290552905629057290582905929060290612906229063290642906529066290672906829069290702907129072290732907429075290762907729078290792908029081290822908329084290852908629087290882908929090290912909229093290942909529096290972909829099291002910129102291032910429105291062910729108291092911029111291122911329114291152911629117291182911929120291212912229123291242912529126291272912829129291302913129132291332913429135291362913729138291392914029141291422914329144291452914629147291482914929150291512915229153291542915529156291572915829159291602916129162291632916429165291662916729168291692917029171291722917329174291752917629177291782917929180291812918229183291842918529186291872918829189291902919129192291932919429195291962919729198291992920029201292022920329204292052920629207292082920929210292112921229213292142921529216292172921829219292202922129222292232922429225292262922729228292292923029231292322923329234292352923629237292382923929240292412924229243292442924529246292472924829249292502925129252292532925429255292562925729258292592926029261292622926329264292652926629267292682926929270292712927229273292742927529276292772927829279292802928129282292832928429285292862928729288292892929029291292922929329294292952929629297292982929929300293012930229303293042930529306293072930829309293102931129312293132931429315293162931729318293192932029321293222932329324293252932629327293282932929330293312933229333293342933529336293372933829339293402934129342293432934429345293462934729348293492935029351293522935329354293552935629357293582935929360293612936229363293642936529366293672936829369293702937129372293732937429375293762937729378293792938029381293822938329384293852938629387293882938929390293912939229393293942939529396293972939829399294002940129402294032940429405294062940729408294092941029411294122941329414294152941629417294182941929420294212942229423294242942529426294272942829429294302943129432294332943429435294362943729438294392944029441294422944329444294452944629447294482944929450294512945229453294542945529456294572945829459294602946129462294632946429465294662946729468294692947029471294722947329474294752947629477294782947929480294812948229483294842948529486294872948829489294902949129492294932949429495294962949729498294992950029501295022950329504295052950629507295082950929510295112951229513295142951529516295172951829519295202952129522295232952429525295262952729528295292953029531295322953329534295352953629537295382953929540295412954229543295442954529546295472954829549295502955129552295532955429555295562955729558295592956029561295622956329564295652956629567295682956929570295712957229573295742957529576295772957829579295802958129582295832958429585295862958729588295892959029591295922959329594295952959629597295982959929600296012960229603296042960529606296072960829609296102961129612296132961429615296162961729618296192962029621296222962329624296252962629627296282962929630296312963229633296342963529636296372963829639296402964129642296432964429645296462964729648296492965029651296522965329654296552965629657296582965929660296612966229663296642966529666296672966829669296702967129672296732967429675296762967729678296792968029681296822968329684296852968629687296882968929690296912969229693296942969529696296972969829699297002970129702297032970429705297062970729708297092971029711297122971329714297152971629717297182971929720297212972229723297242972529726297272972829729297302973129732297332973429735297362973729738297392974029741297422974329744297452974629747297482974929750297512975229753297542975529756297572975829759297602976129762297632976429765297662976729768297692977029771297722977329774297752977629777297782977929780297812978229783297842978529786297872978829789297902979129792297932979429795297962979729798297992980029801298022980329804298052980629807298082980929810298112981229813298142981529816298172981829819298202982129822298232982429825298262982729828298292983029831298322983329834298352983629837298382983929840298412984229843298442984529846298472984829849298502985129852298532985429855298562985729858298592986029861298622986329864298652986629867298682986929870298712987229873298742987529876298772987829879298802988129882298832988429885298862988729888298892989029891298922989329894298952989629897298982989929900299012990229903299042990529906299072990829909299102991129912299132991429915299162991729918299192992029921299222992329924299252992629927299282992929930299312993229933299342993529936299372993829939299402994129942299432994429945299462994729948299492995029951299522995329954299552995629957299582995929960299612996229963299642996529966299672996829969299702997129972299732997429975299762997729978299792998029981299822998329984299852998629987299882998929990299912999229993299942999529996299972999829999300003000130002300033000430005300063000730008300093001030011300123001330014300153001630017300183001930020300213002230023300243002530026300273002830029300303003130032300333003430035300363003730038300393004030041300423004330044300453004630047300483004930050300513005230053300543005530056300573005830059300603006130062300633006430065300663006730068300693007030071300723007330074300753007630077300783007930080300813008230083300843008530086300873008830089300903009130092300933009430095300963009730098300993010030101301023010330104301053010630107301083010930110301113011230113301143011530116301173011830119301203012130122301233012430125301263012730128301293013030131301323013330134301353013630137301383013930140301413014230143301443014530146301473014830149301503015130152301533015430155301563015730158301593016030161301623016330164301653016630167301683016930170301713017230173301743017530176301773017830179301803018130182301833018430185301863018730188301893019030191301923019330194301953019630197301983019930200302013020230203302043020530206302073020830209302103021130212302133021430215302163021730218302193022030221302223022330224302253022630227302283022930230302313023230233302343023530236302373023830239302403024130242302433024430245302463024730248302493025030251302523025330254302553025630257302583025930260302613026230263302643026530266302673026830269302703027130272302733027430275302763027730278302793028030281302823028330284302853028630287302883028930290302913029230293302943029530296302973029830299303003030130302303033030430305303063030730308303093031030311303123031330314303153031630317303183031930320303213032230323303243032530326303273032830329303303033130332303333033430335303363033730338303393034030341303423034330344303453034630347303483034930350303513035230353303543035530356303573035830359303603036130362303633036430365303663036730368303693037030371303723037330374303753037630377303783037930380303813038230383303843038530386303873038830389303903039130392303933039430395303963039730398303993040030401304023040330404304053040630407304083040930410304113041230413304143041530416304173041830419304203042130422304233042430425304263042730428304293043030431304323043330434304353043630437304383043930440304413044230443304443044530446304473044830449304503045130452304533045430455304563045730458304593046030461304623046330464304653046630467304683046930470304713047230473304743047530476304773047830479304803048130482304833048430485304863048730488304893049030491304923049330494304953049630497304983049930500305013050230503305043050530506305073050830509305103051130512305133051430515305163051730518305193052030521305223052330524305253052630527305283052930530305313053230533305343053530536305373053830539305403054130542305433054430545305463054730548305493055030551305523055330554305553055630557305583055930560305613056230563305643056530566305673056830569305703057130572305733057430575305763057730578305793058030581305823058330584305853058630587305883058930590305913059230593305943059530596305973059830599306003060130602306033060430605306063060730608306093061030611306123061330614306153061630617306183061930620306213062230623306243062530626306273062830629306303063130632306333063430635306363063730638306393064030641306423064330644306453064630647306483064930650306513065230653306543065530656306573065830659306603066130662306633066430665306663066730668306693067030671306723067330674306753067630677306783067930680306813068230683306843068530686306873068830689306903069130692306933069430695306963069730698306993070030701307023070330704307053070630707307083070930710307113071230713307143071530716307173071830719307203072130722307233072430725307263072730728307293073030731307323073330734307353073630737307383073930740307413074230743307443074530746307473074830749307503075130752307533075430755307563075730758307593076030761307623076330764307653076630767307683076930770307713077230773307743077530776307773077830779307803078130782307833078430785307863078730788307893079030791307923079330794307953079630797307983079930800308013080230803308043080530806308073080830809308103081130812308133081430815308163081730818308193082030821308223082330824308253082630827308283082930830308313083230833308343083530836308373083830839308403084130842308433084430845308463084730848308493085030851308523085330854308553085630857308583085930860308613086230863308643086530866308673086830869308703087130872308733087430875308763087730878308793088030881308823088330884308853088630887308883088930890308913089230893308943089530896308973089830899309003090130902309033090430905309063090730908309093091030911309123091330914309153091630917309183091930920309213092230923309243092530926309273092830929309303093130932309333093430935309363093730938309393094030941309423094330944309453094630947309483094930950309513095230953309543095530956309573095830959309603096130962309633096430965309663096730968309693097030971309723097330974309753097630977309783097930980309813098230983309843098530986309873098830989309903099130992309933099430995309963099730998309993100031001310023100331004310053100631007310083100931010310113101231013310143101531016310173101831019310203102131022310233102431025310263102731028310293103031031310323103331034310353103631037310383103931040310413104231043310443104531046310473104831049310503105131052310533105431055310563105731058310593106031061310623106331064310653106631067310683106931070310713107231073310743107531076310773107831079310803108131082310833108431085310863108731088310893109031091310923109331094310953109631097310983109931100311013110231103311043110531106311073110831109311103111131112311133111431115311163111731118311193112031121311223112331124311253112631127311283112931130311313113231133311343113531136311373113831139311403114131142311433114431145311463114731148311493115031151311523115331154311553115631157311583115931160311613116231163311643116531166311673116831169311703117131172311733117431175311763117731178311793118031181311823118331184311853118631187311883118931190311913119231193311943119531196311973119831199312003120131202312033120431205312063120731208312093121031211312123121331214312153121631217312183121931220312213122231223312243122531226312273122831229312303123131232312333123431235312363123731238312393124031241312423124331244312453124631247312483124931250312513125231253312543125531256312573125831259312603126131262312633126431265312663126731268312693127031271312723127331274312753127631277312783127931280312813128231283312843128531286312873128831289312903129131292312933129431295312963129731298312993130031301313023130331304313053130631307313083130931310313113131231313313143131531316313173131831319313203132131322313233132431325313263132731328313293133031331313323133331334313353133631337313383133931340313413134231343313443134531346313473134831349313503135131352313533135431355313563135731358313593136031361313623136331364313653136631367313683136931370313713137231373313743137531376313773137831379313803138131382313833138431385313863138731388313893139031391313923139331394313953139631397313983139931400314013140231403314043140531406314073140831409314103141131412314133141431415314163141731418314193142031421314223142331424314253142631427314283142931430314313143231433314343143531436314373143831439314403144131442314433144431445314463144731448314493145031451314523145331454314553145631457314583145931460314613146231463314643146531466314673146831469314703147131472314733147431475314763147731478314793148031481314823148331484314853148631487314883148931490314913149231493314943149531496314973149831499315003150131502315033150431505315063150731508315093151031511315123151331514315153151631517315183151931520315213152231523315243152531526315273152831529315303153131532315333153431535315363153731538315393154031541315423154331544315453154631547315483154931550315513155231553315543155531556315573155831559315603156131562315633156431565315663156731568315693157031571315723157331574315753157631577315783157931580315813158231583315843158531586315873158831589315903159131592315933159431595315963159731598315993160031601316023160331604316053160631607316083160931610316113161231613316143161531616316173161831619316203162131622316233162431625316263162731628316293163031631316323163331634316353163631637316383163931640316413164231643316443164531646316473164831649316503165131652316533165431655316563165731658316593166031661316623166331664316653166631667316683166931670316713167231673316743167531676316773167831679316803168131682316833168431685316863168731688316893169031691316923169331694316953169631697316983169931700317013170231703317043170531706317073170831709317103171131712317133171431715317163171731718317193172031721317223172331724317253172631727317283172931730317313173231733317343173531736317373173831739317403174131742317433174431745317463174731748317493175031751317523175331754317553175631757317583175931760317613176231763317643176531766317673176831769317703177131772317733177431775317763177731778317793178031781317823178331784317853178631787317883178931790317913179231793317943179531796317973179831799318003180131802318033180431805318063180731808318093181031811318123181331814318153181631817318183181931820318213182231823318243182531826318273182831829318303183131832318333183431835318363183731838318393184031841318423184331844318453184631847318483184931850318513185231853318543185531856318573185831859318603186131862318633186431865318663186731868318693187031871318723187331874318753187631877318783187931880318813188231883318843188531886318873188831889318903189131892318933189431895318963189731898318993190031901319023190331904319053190631907319083190931910319113191231913319143191531916319173191831919319203192131922319233192431925319263192731928319293193031931319323193331934319353193631937319383193931940319413194231943319443194531946319473194831949319503195131952319533195431955319563195731958319593196031961319623196331964319653196631967319683196931970319713197231973319743197531976319773197831979319803198131982319833198431985319863198731988319893199031991319923199331994319953199631997319983199932000320013200232003320043200532006320073200832009320103201132012320133201432015320163201732018320193202032021320223202332024320253202632027320283202932030320313203232033320343203532036320373203832039320403204132042320433204432045320463204732048320493205032051320523205332054320553205632057320583205932060320613206232063320643206532066320673206832069320703207132072320733207432075320763207732078320793208032081320823208332084320853208632087320883208932090320913209232093320943209532096320973209832099321003210132102321033210432105321063210732108321093211032111321123211332114321153211632117321183211932120321213212232123321243212532126321273212832129321303213132132321333213432135321363213732138321393214032141321423214332144321453214632147321483214932150321513215232153321543215532156321573215832159321603216132162321633216432165321663216732168321693217032171321723217332174321753217632177321783217932180321813218232183321843218532186321873218832189321903219132192321933219432195321963219732198321993220032201322023220332204322053220632207322083220932210322113221232213322143221532216322173221832219322203222132222322233222432225322263222732228322293223032231322323223332234322353223632237322383223932240322413224232243322443224532246322473224832249322503225132252322533225432255322563225732258322593226032261322623226332264322653226632267322683226932270322713227232273322743227532276322773227832279322803228132282322833228432285322863228732288322893229032291322923229332294322953229632297322983229932300323013230232303323043230532306323073230832309323103231132312323133231432315323163231732318323193232032321323223232332324323253232632327323283232932330323313233232333323343233532336323373233832339323403234132342323433234432345323463234732348323493235032351323523235332354323553235632357323583235932360323613236232363323643236532366323673236832369323703237132372323733237432375323763237732378323793238032381323823238332384323853238632387323883238932390323913239232393323943239532396323973239832399324003240132402324033240432405324063240732408324093241032411324123241332414324153241632417324183241932420324213242232423324243242532426324273242832429324303243132432324333243432435324363243732438324393244032441324423244332444324453244632447324483244932450324513245232453324543245532456324573245832459324603246132462324633246432465324663246732468324693247032471324723247332474324753247632477324783247932480324813248232483324843248532486324873248832489324903249132492324933249432495324963249732498324993250032501325023250332504325053250632507325083250932510325113251232513325143251532516325173251832519325203252132522325233252432525325263252732528325293253032531325323253332534325353253632537325383253932540325413254232543325443254532546325473254832549325503255132552325533255432555325563255732558325593256032561325623256332564325653256632567325683256932570325713257232573325743257532576325773257832579325803258132582325833258432585325863258732588325893259032591325923259332594325953259632597325983259932600326013260232603326043260532606326073260832609326103261132612326133261432615326163261732618326193262032621326223262332624326253262632627326283262932630326313263232633326343263532636326373263832639326403264132642326433264432645326463264732648326493265032651326523265332654326553265632657326583265932660326613266232663326643266532666326673266832669326703267132672326733267432675326763267732678326793268032681326823268332684326853268632687326883268932690326913269232693326943269532696326973269832699327003270132702327033270432705327063270732708327093271032711327123271332714327153271632717327183271932720327213272232723327243272532726327273272832729327303273132732327333273432735327363273732738327393274032741327423274332744327453274632747327483274932750327513275232753327543275532756327573275832759327603276132762327633276432765327663276732768327693277032771327723277332774327753277632777327783277932780327813278232783327843278532786327873278832789327903279132792327933279432795327963279732798327993280032801328023280332804328053280632807328083280932810328113281232813328143281532816328173281832819328203282132822328233282432825328263282732828328293283032831328323283332834328353283632837328383283932840328413284232843328443284532846328473284832849328503285132852328533285432855328563285732858328593286032861328623286332864328653286632867328683286932870328713287232873328743287532876328773287832879328803288132882328833288432885328863288732888328893289032891328923289332894328953289632897328983289932900329013290232903329043290532906329073290832909329103291132912329133291432915329163291732918329193292032921329223292332924329253292632927329283292932930329313293232933329343293532936329373293832939329403294132942329433294432945329463294732948329493295032951329523295332954329553295632957329583295932960329613296232963329643296532966329673296832969329703297132972329733297432975329763297732978329793298032981329823298332984329853298632987329883298932990329913299232993329943299532996329973299832999330003300133002330033300433005330063300733008330093301033011330123301333014330153301633017330183301933020330213302233023330243302533026330273302833029330303303133032330333303433035330363303733038330393304033041330423304333044330453304633047330483304933050330513305233053330543305533056330573305833059330603306133062330633306433065330663306733068330693307033071330723307333074330753307633077330783307933080330813308233083330843308533086330873308833089330903309133092330933309433095330963309733098330993310033101331023310333104331053310633107331083310933110331113311233113331143311533116331173311833119331203312133122331233312433125331263312733128331293313033131331323313333134331353313633137331383313933140331413314233143331443314533146331473314833149331503315133152331533315433155331563315733158331593316033161331623316333164331653316633167331683316933170331713317233173331743317533176331773317833179331803318133182331833318433185331863318733188331893319033191331923319333194331953319633197331983319933200332013320233203332043320533206332073320833209332103321133212332133321433215332163321733218332193322033221332223322333224332253322633227332283322933230332313323233233332343323533236332373323833239332403324133242332433324433245332463324733248332493325033251332523325333254332553325633257332583325933260332613326233263332643326533266332673326833269332703327133272332733327433275332763327733278332793328033281332823328333284332853328633287332883328933290332913329233293332943329533296332973329833299333003330133302333033330433305333063330733308333093331033311333123331333314333153331633317333183331933320333213332233323333243332533326333273332833329333303333133332333333333433335333363333733338333393334033341333423334333344333453334633347333483334933350333513335233353333543335533356333573335833359333603336133362333633336433365333663336733368333693337033371333723337333374333753337633377333783337933380333813338233383333843338533386333873338833389333903339133392333933339433395333963339733398333993340033401334023340333404334053340633407334083340933410334113341233413334143341533416334173341833419334203342133422334233342433425334263342733428334293343033431334323343333434334353343633437334383343933440334413344233443334443344533446334473344833449334503345133452334533345433455334563345733458334593346033461334623346333464334653346633467334683346933470334713347233473334743347533476334773347833479334803348133482334833348433485334863348733488334893349033491334923349333494334953349633497334983349933500335013350233503335043350533506335073350833509335103351133512335133351433515335163351733518335193352033521335223352333524335253352633527335283352933530335313353233533335343353533536335373353833539335403354133542335433354433545335463354733548335493355033551335523355333554335553355633557335583355933560335613356233563335643356533566335673356833569335703357133572335733357433575335763357733578335793358033581335823358333584335853358633587335883358933590335913359233593335943359533596335973359833599336003360133602336033360433605336063360733608336093361033611336123361333614336153361633617336183361933620336213362233623336243362533626336273362833629336303363133632336333363433635336363363733638336393364033641336423364333644336453364633647336483364933650336513365233653336543365533656336573365833659336603366133662336633366433665336663366733668336693367033671336723367333674336753367633677336783367933680336813368233683336843368533686336873368833689336903369133692336933369433695336963369733698336993370033701337023370333704337053370633707337083370933710337113371233713337143371533716337173371833719337203372133722337233372433725337263372733728337293373033731337323373333734337353373633737337383373933740337413374233743337443374533746337473374833749337503375133752337533375433755337563375733758337593376033761337623376333764337653376633767337683376933770337713377233773337743377533776337773377833779337803378133782337833378433785337863378733788337893379033791337923379333794337953379633797337983379933800338013380233803338043380533806338073380833809338103381133812338133381433815338163381733818338193382033821338223382333824338253382633827338283382933830338313383233833338343383533836338373383833839338403384133842338433384433845338463384733848338493385033851338523385333854338553385633857338583385933860338613386233863338643386533866338673386833869338703387133872338733387433875338763387733878338793388033881338823388333884338853388633887338883388933890338913389233893338943389533896338973389833899339003390133902339033390433905339063390733908339093391033911339123391333914339153391633917339183391933920339213392233923339243392533926339273392833929339303393133932339333393433935339363393733938339393394033941339423394333944339453394633947339483394933950339513395233953339543395533956339573395833959339603396133962339633396433965339663396733968339693397033971339723397333974339753397633977339783397933980339813398233983339843398533986339873398833989339903399133992339933399433995339963399733998339993400034001340023400334004340053400634007340083400934010340113401234013340143401534016340173401834019340203402134022340233402434025340263402734028340293403034031340323403334034340353403634037340383403934040340413404234043340443404534046340473404834049340503405134052340533405434055340563405734058340593406034061340623406334064340653406634067340683406934070340713407234073340743407534076340773407834079340803408134082340833408434085340863408734088340893409034091340923409334094340953409634097340983409934100341013410234103341043410534106341073410834109341103411134112341133411434115341163411734118341193412034121341223412334124341253412634127341283412934130341313413234133341343413534136341373413834139341403414134142341433414434145341463414734148341493415034151341523415334154341553415634157341583415934160341613416234163341643416534166341673416834169341703417134172341733417434175341763417734178341793418034181341823418334184341853418634187341883418934190341913419234193341943419534196341973419834199342003420134202342033420434205342063420734208342093421034211342123421334214342153421634217342183421934220342213422234223342243422534226342273422834229342303423134232342333423434235342363423734238342393424034241342423424334244342453424634247342483424934250342513425234253342543425534256342573425834259342603426134262342633426434265342663426734268342693427034271342723427334274342753427634277342783427934280342813428234283342843428534286342873428834289342903429134292342933429434295342963429734298342993430034301343023430334304343053430634307343083430934310343113431234313343143431534316343173431834319343203432134322343233432434325343263432734328343293433034331343323433334334343353433634337343383433934340343413434234343343443434534346343473434834349343503435134352343533435434355343563435734358343593436034361343623436334364343653436634367343683436934370343713437234373343743437534376343773437834379343803438134382343833438434385343863438734388343893439034391343923439334394343953439634397343983439934400344013440234403344043440534406344073440834409344103441134412344133441434415344163441734418344193442034421344223442334424344253442634427344283442934430344313443234433344343443534436344373443834439344403444134442344433444434445344463444734448344493445034451344523445334454344553445634457344583445934460344613446234463344643446534466344673446834469344703447134472344733447434475344763447734478344793448034481344823448334484344853448634487344883448934490344913449234493344943449534496344973449834499345003450134502345033450434505345063450734508345093451034511345123451334514345153451634517345183451934520345213452234523345243452534526345273452834529345303453134532345333453434535345363453734538345393454034541345423454334544345453454634547345483454934550345513455234553345543455534556345573455834559345603456134562345633456434565345663456734568345693457034571345723457334574345753457634577345783457934580345813458234583345843458534586345873458834589345903459134592345933459434595345963459734598345993460034601346023460334604346053460634607346083460934610346113461234613346143461534616346173461834619346203462134622346233462434625346263462734628346293463034631346323463334634346353463634637346383463934640346413464234643346443464534646346473464834649346503465134652346533465434655346563465734658346593466034661346623466334664346653466634667346683466934670346713467234673346743467534676346773467834679346803468134682346833468434685346863468734688346893469034691346923469334694346953469634697346983469934700347013470234703347043470534706347073470834709347103471134712347133471434715347163471734718347193472034721347223472334724347253472634727347283472934730347313473234733347343473534736347373473834739347403474134742347433474434745347463474734748347493475034751347523475334754347553475634757347583475934760347613476234763347643476534766347673476834769347703477134772347733477434775347763477734778347793478034781347823478334784347853478634787347883478934790347913479234793347943479534796347973479834799348003480134802348033480434805348063480734808348093481034811348123481334814348153481634817348183481934820348213482234823348243482534826348273482834829348303483134832348333483434835348363483734838348393484034841348423484334844348453484634847348483484934850348513485234853348543485534856348573485834859348603486134862348633486434865348663486734868348693487034871348723487334874348753487634877348783487934880348813488234883348843488534886348873488834889348903489134892348933489434895348963489734898348993490034901349023490334904349053490634907349083490934910349113491234913349143491534916349173491834919349203492134922349233492434925349263492734928349293493034931349323493334934349353493634937349383493934940349413494234943349443494534946349473494834949349503495134952349533495434955349563495734958349593496034961349623496334964349653496634967349683496934970349713497234973349743497534976349773497834979349803498134982349833498434985349863498734988349893499034991349923499334994349953499634997349983499935000350013500235003350043500535006350073500835009350103501135012350133501435015350163501735018350193502035021350223502335024350253502635027350283502935030350313503235033350343503535036350373503835039350403504135042350433504435045350463504735048350493505035051350523505335054350553505635057350583505935060350613506235063350643506535066350673506835069350703507135072350733507435075350763507735078350793508035081350823508335084350853508635087350883508935090350913509235093350943509535096350973509835099351003510135102351033510435105351063510735108351093511035111351123511335114351153511635117351183511935120351213512235123351243512535126351273512835129351303513135132351333513435135351363513735138351393514035141351423514335144351453514635147351483514935150351513515235153351543515535156351573515835159351603516135162351633516435165351663516735168351693517035171351723517335174351753517635177351783517935180351813518235183351843518535186351873518835189351903519135192351933519435195351963519735198351993520035201352023520335204352053520635207352083520935210352113521235213352143521535216352173521835219352203522135222352233522435225352263522735228352293523035231352323523335234352353523635237352383523935240352413524235243352443524535246352473524835249352503525135252352533525435255352563525735258352593526035261352623526335264352653526635267352683526935270352713527235273352743527535276352773527835279352803528135282352833528435285352863528735288352893529035291352923529335294352953529635297352983529935300353013530235303353043530535306353073530835309353103531135312353133531435315353163531735318353193532035321353223532335324353253532635327353283532935330353313533235333353343533535336353373533835339353403534135342353433534435345353463534735348353493535035351353523535335354353553535635357353583535935360353613536235363353643536535366353673536835369353703537135372353733537435375353763537735378353793538035381353823538335384353853538635387353883538935390353913539235393353943539535396353973539835399354003540135402354033540435405354063540735408354093541035411354123541335414354153541635417354183541935420354213542235423354243542535426354273542835429354303543135432354333543435435354363543735438354393544035441354423544335444354453544635447354483544935450354513545235453354543545535456354573545835459354603546135462354633546435465354663546735468354693547035471354723547335474354753547635477354783547935480354813548235483354843548535486354873548835489354903549135492354933549435495354963549735498354993550035501355023550335504355053550635507355083550935510355113551235513355143551535516355173551835519355203552135522355233552435525355263552735528355293553035531355323553335534355353553635537355383553935540355413554235543355443554535546355473554835549355503555135552355533555435555355563555735558355593556035561355623556335564355653556635567355683556935570355713557235573355743557535576355773557835579355803558135582355833558435585355863558735588355893559035591355923559335594355953559635597355983559935600356013560235603356043560535606356073560835609356103561135612356133561435615356163561735618356193562035621356223562335624356253562635627356283562935630356313563235633356343563535636356373563835639356403564135642356433564435645356463564735648356493565035651356523565335654356553565635657356583565935660356613566235663356643566535666356673566835669356703567135672356733567435675356763567735678356793568035681356823568335684356853568635687356883568935690356913569235693356943569535696356973569835699357003570135702357033570435705357063570735708357093571035711357123571335714357153571635717357183571935720357213572235723357243572535726357273572835729357303573135732357333573435735357363573735738357393574035741357423574335744357453574635747357483574935750357513575235753357543575535756357573575835759357603576135762357633576435765357663576735768357693577035771357723577335774357753577635777357783577935780357813578235783357843578535786357873578835789357903579135792357933579435795357963579735798357993580035801358023580335804358053580635807358083580935810358113581235813358143581535816358173581835819358203582135822358233582435825358263582735828358293583035831358323583335834358353583635837358383583935840358413584235843358443584535846358473584835849358503585135852358533585435855358563585735858358593586035861358623586335864358653586635867358683586935870358713587235873358743587535876358773587835879358803588135882358833588435885358863588735888358893589035891358923589335894358953589635897358983589935900359013590235903359043590535906359073590835909359103591135912359133591435915359163591735918359193592035921359223592335924359253592635927359283592935930359313593235933359343593535936359373593835939359403594135942359433594435945359463594735948359493595035951359523595335954359553595635957359583595935960359613596235963359643596535966359673596835969359703597135972359733597435975359763597735978359793598035981359823598335984359853598635987359883598935990359913599235993359943599535996359973599835999360003600136002360033600436005360063600736008360093601036011360123601336014360153601636017360183601936020360213602236023360243602536026360273602836029360303603136032360333603436035360363603736038360393604036041360423604336044360453604636047360483604936050360513605236053360543605536056360573605836059360603606136062360633606436065360663606736068360693607036071360723607336074360753607636077360783607936080360813608236083360843608536086360873608836089360903609136092360933609436095360963609736098360993610036101361023610336104361053610636107361083610936110361113611236113361143611536116361173611836119361203612136122361233612436125361263612736128361293613036131361323613336134361353613636137361383613936140361413614236143361443614536146361473614836149361503615136152361533615436155361563615736158361593616036161361623616336164361653616636167361683616936170361713617236173361743617536176361773617836179361803618136182361833618436185361863618736188361893619036191361923619336194361953619636197361983619936200362013620236203362043620536206362073620836209362103621136212362133621436215362163621736218362193622036221362223622336224362253622636227362283622936230362313623236233362343623536236362373623836239362403624136242362433624436245362463624736248362493625036251362523625336254362553625636257362583625936260362613626236263362643626536266362673626836269362703627136272362733627436275362763627736278362793628036281362823628336284362853628636287362883628936290362913629236293362943629536296362973629836299363003630136302363033630436305363063630736308363093631036311363123631336314363153631636317363183631936320363213632236323363243632536326363273632836329363303633136332363333633436335363363633736338363393634036341363423634336344363453634636347363483634936350363513635236353363543635536356363573635836359363603636136362363633636436365363663636736368363693637036371363723637336374363753637636377363783637936380363813638236383363843638536386363873638836389363903639136392363933639436395363963639736398363993640036401364023640336404364053640636407364083640936410364113641236413364143641536416364173641836419364203642136422364233642436425364263642736428364293643036431364323643336434364353643636437364383643936440364413644236443364443644536446364473644836449364503645136452364533645436455364563645736458364593646036461364623646336464364653646636467364683646936470364713647236473364743647536476364773647836479364803648136482364833648436485364863648736488364893649036491364923649336494364953649636497364983649936500365013650236503365043650536506365073650836509365103651136512365133651436515365163651736518365193652036521365223652336524365253652636527365283652936530365313653236533365343653536536365373653836539365403654136542365433654436545365463654736548365493655036551365523655336554365553655636557365583655936560365613656236563365643656536566365673656836569365703657136572365733657436575365763657736578365793658036581365823658336584365853658636587365883658936590365913659236593365943659536596365973659836599366003660136602366033660436605366063660736608366093661036611366123661336614366153661636617366183661936620366213662236623366243662536626366273662836629366303663136632366333663436635366363663736638366393664036641366423664336644366453664636647366483664936650366513665236653366543665536656366573665836659366603666136662366633666436665366663666736668366693667036671366723667336674366753667636677366783667936680366813668236683366843668536686366873668836689366903669136692366933669436695366963669736698366993670036701367023670336704367053670636707367083670936710367113671236713367143671536716367173671836719367203672136722367233672436725367263672736728367293673036731367323673336734367353673636737367383673936740367413674236743367443674536746367473674836749367503675136752367533675436755367563675736758367593676036761367623676336764367653676636767367683676936770367713677236773367743677536776367773677836779367803678136782367833678436785367863678736788367893679036791367923679336794367953679636797367983679936800368013680236803368043680536806368073680836809368103681136812368133681436815368163681736818368193682036821368223682336824368253682636827368283682936830368313683236833368343683536836368373683836839368403684136842368433684436845368463684736848368493685036851368523685336854368553685636857368583685936860368613686236863368643686536866368673686836869368703687136872368733687436875368763687736878368793688036881368823688336884368853688636887368883688936890368913689236893368943689536896368973689836899369003690136902369033690436905369063690736908369093691036911369123691336914369153691636917369183691936920369213692236923369243692536926369273692836929369303693136932369333693436935369363693736938369393694036941369423694336944369453694636947369483694936950369513695236953369543695536956369573695836959369603696136962369633696436965369663696736968369693697036971369723697336974369753697636977369783697936980369813698236983369843698536986369873698836989369903699136992369933699436995369963699736998369993700037001370023700337004370053700637007370083700937010370113701237013370143701537016370173701837019370203702137022370233702437025370263702737028370293703037031370323703337034370353703637037370383703937040370413704237043370443704537046370473704837049370503705137052370533705437055370563705737058370593706037061370623706337064370653706637067370683706937070370713707237073370743707537076370773707837079370803708137082370833708437085370863708737088370893709037091370923709337094370953709637097370983709937100371013710237103371043710537106371073710837109371103711137112371133711437115371163711737118371193712037121371223712337124371253712637127371283712937130371313713237133371343713537136371373713837139371403714137142371433714437145371463714737148371493715037151371523715337154371553715637157371583715937160371613716237163371643716537166371673716837169371703717137172371733717437175371763717737178371793718037181371823718337184371853718637187371883718937190371913719237193371943719537196371973719837199372003720137202372033720437205372063720737208372093721037211372123721337214372153721637217372183721937220372213722237223372243722537226372273722837229372303723137232372333723437235372363723737238372393724037241372423724337244372453724637247372483724937250372513725237253372543725537256372573725837259372603726137262372633726437265372663726737268372693727037271372723727337274372753727637277372783727937280372813728237283372843728537286372873728837289372903729137292372933729437295372963729737298372993730037301373023730337304373053730637307373083730937310373113731237313373143731537316373173731837319373203732137322373233732437325373263732737328373293733037331373323733337334373353733637337373383733937340373413734237343373443734537346373473734837349373503735137352373533735437355373563735737358373593736037361373623736337364373653736637367373683736937370373713737237373373743737537376373773737837379373803738137382373833738437385373863738737388373893739037391373923739337394373953739637397373983739937400374013740237403374043740537406374073740837409374103741137412374133741437415374163741737418374193742037421374223742337424374253742637427374283742937430374313743237433374343743537436374373743837439374403744137442374433744437445374463744737448374493745037451374523745337454374553745637457374583745937460374613746237463374643746537466374673746837469374703747137472374733747437475374763747737478374793748037481374823748337484374853748637487374883748937490374913749237493374943749537496374973749837499375003750137502375033750437505375063750737508375093751037511375123751337514375153751637517375183751937520375213752237523375243752537526375273752837529375303753137532375333753437535375363753737538375393754037541375423754337544375453754637547375483754937550375513755237553375543755537556375573755837559375603756137562375633756437565375663756737568375693757037571375723757337574375753757637577375783757937580375813758237583375843758537586375873758837589375903759137592375933759437595375963759737598375993760037601376023760337604376053760637607376083760937610376113761237613376143761537616376173761837619376203762137622376233762437625376263762737628376293763037631376323763337634376353763637637376383763937640376413764237643376443764537646376473764837649376503765137652376533765437655376563765737658376593766037661376623766337664376653766637667376683766937670376713767237673376743767537676376773767837679376803768137682376833768437685376863768737688376893769037691376923769337694376953769637697376983769937700377013770237703377043770537706377073770837709377103771137712377133771437715377163771737718377193772037721377223772337724377253772637727377283772937730377313773237733377343773537736377373773837739377403774137742377433774437745377463774737748377493775037751377523775337754377553775637757377583775937760377613776237763377643776537766377673776837769377703777137772377733777437775377763777737778377793778037781377823778337784377853778637787377883778937790377913779237793377943779537796377973779837799378003780137802378033780437805378063780737808378093781037811378123781337814378153781637817378183781937820378213782237823378243782537826378273782837829378303783137832378333783437835378363783737838378393784037841378423784337844378453784637847378483784937850378513785237853378543785537856378573785837859378603786137862378633786437865378663786737868378693787037871378723787337874378753787637877378783787937880378813788237883378843788537886378873788837889378903789137892378933789437895378963789737898378993790037901379023790337904379053790637907379083790937910379113791237913379143791537916379173791837919379203792137922379233792437925379263792737928379293793037931379323793337934379353793637937379383793937940379413794237943379443794537946379473794837949379503795137952379533795437955379563795737958379593796037961379623796337964379653796637967379683796937970379713797237973379743797537976379773797837979379803798137982379833798437985379863798737988379893799037991379923799337994379953799637997379983799938000380013800238003380043800538006380073800838009380103801138012380133801438015380163801738018380193802038021380223802338024380253802638027380283802938030380313803238033380343803538036380373803838039380403804138042380433804438045380463804738048380493805038051380523805338054380553805638057380583805938060380613806238063380643806538066380673806838069380703807138072380733807438075380763807738078380793808038081380823808338084380853808638087380883808938090380913809238093380943809538096380973809838099381003810138102381033810438105381063810738108381093811038111381123811338114381153811638117381183811938120381213812238123381243812538126381273812838129381303813138132381333813438135381363813738138381393814038141381423814338144381453814638147381483814938150381513815238153381543815538156381573815838159381603816138162381633816438165381663816738168381693817038171381723817338174381753817638177381783817938180381813818238183381843818538186381873818838189381903819138192381933819438195381963819738198381993820038201382023820338204382053820638207382083820938210382113821238213382143821538216382173821838219382203822138222382233822438225382263822738228382293823038231382323823338234382353823638237382383823938240382413824238243382443824538246382473824838249382503825138252382533825438255382563825738258382593826038261382623826338264382653826638267382683826938270382713827238273382743827538276382773827838279382803828138282382833828438285382863828738288382893829038291382923829338294382953829638297382983829938300383013830238303383043830538306383073830838309383103831138312383133831438315383163831738318383193832038321383223832338324383253832638327383283832938330383313833238333383343833538336383373833838339383403834138342383433834438345383463834738348383493835038351383523835338354383553835638357383583835938360383613836238363383643836538366383673836838369383703837138372383733837438375383763837738378383793838038381383823838338384383853838638387383883838938390383913839238393383943839538396383973839838399384003840138402384033840438405384063840738408384093841038411384123841338414384153841638417384183841938420384213842238423384243842538426384273842838429384303843138432384333843438435384363843738438384393844038441384423844338444384453844638447384483844938450384513845238453384543845538456384573845838459384603846138462384633846438465384663846738468384693847038471384723847338474384753847638477384783847938480384813848238483384843848538486384873848838489384903849138492384933849438495384963849738498384993850038501385023850338504385053850638507385083850938510385113851238513385143851538516385173851838519385203852138522385233852438525385263852738528385293853038531385323853338534385353853638537385383853938540385413854238543385443854538546385473854838549385503855138552385533855438555385563855738558385593856038561385623856338564385653856638567385683856938570385713857238573385743857538576385773857838579385803858138582385833858438585385863858738588385893859038591385923859338594385953859638597385983859938600386013860238603386043860538606386073860838609386103861138612386133861438615386163861738618386193862038621386223862338624386253862638627386283862938630386313863238633386343863538636386373863838639386403864138642386433864438645386463864738648386493865038651386523865338654386553865638657386583865938660386613866238663386643866538666386673866838669386703867138672386733867438675386763867738678386793868038681386823868338684386853868638687386883868938690386913869238693386943869538696386973869838699387003870138702387033870438705387063870738708387093871038711387123871338714387153871638717387183871938720387213872238723387243872538726387273872838729387303873138732387333873438735387363873738738387393874038741387423874338744387453874638747387483874938750387513875238753387543875538756387573875838759387603876138762387633876438765387663876738768387693877038771387723877338774387753877638777387783877938780387813878238783387843878538786387873878838789387903879138792387933879438795387963879738798387993880038801388023880338804388053880638807388083880938810388113881238813388143881538816388173881838819388203882138822388233882438825388263882738828388293883038831388323883338834388353883638837388383883938840388413884238843388443884538846388473884838849388503885138852388533885438855388563885738858388593886038861388623886338864388653886638867388683886938870388713887238873388743887538876388773887838879388803888138882388833888438885388863888738888388893889038891388923889338894388953889638897388983889938900389013890238903389043890538906389073890838909389103891138912389133891438915389163891738918389193892038921389223892338924389253892638927389283892938930389313893238933389343893538936389373893838939389403894138942389433894438945389463894738948389493895038951389523895338954389553895638957389583895938960389613896238963389643896538966389673896838969389703897138972389733897438975389763897738978389793898038981389823898338984389853898638987389883898938990389913899238993389943899538996389973899838999390003900139002390033900439005390063900739008390093901039011390123901339014390153901639017390183901939020390213902239023390243902539026390273902839029390303903139032390333903439035390363903739038390393904039041390423904339044390453904639047390483904939050390513905239053390543905539056390573905839059390603906139062390633906439065390663906739068390693907039071390723907339074390753907639077390783907939080390813908239083390843908539086390873908839089390903909139092390933909439095390963909739098390993910039101391023910339104391053910639107391083910939110391113911239113391143911539116391173911839119391203912139122391233912439125391263912739128391293913039131391323913339134391353913639137391383913939140391413914239143391443914539146391473914839149391503915139152391533915439155391563915739158391593916039161391623916339164391653916639167391683916939170391713917239173391743917539176391773917839179391803918139182391833918439185391863918739188391893919039191391923919339194391953919639197391983919939200392013920239203392043920539206392073920839209392103921139212392133921439215392163921739218392193922039221392223922339224392253922639227392283922939230392313923239233392343923539236392373923839239392403924139242392433924439245392463924739248392493925039251392523925339254392553925639257392583925939260392613926239263392643926539266392673926839269392703927139272392733927439275392763927739278392793928039281392823928339284392853928639287392883928939290392913929239293392943929539296392973929839299393003930139302393033930439305393063930739308393093931039311393123931339314393153931639317393183931939320393213932239323393243932539326393273932839329393303933139332393333933439335393363933739338393393934039341393423934339344393453934639347393483934939350393513935239353393543935539356393573935839359393603936139362393633936439365393663936739368393693937039371393723937339374393753937639377393783937939380393813938239383393843938539386393873938839389393903939139392393933939439395393963939739398393993940039401394023940339404394053940639407394083940939410394113941239413394143941539416394173941839419394203942139422394233942439425394263942739428394293943039431394323943339434394353943639437394383943939440394413944239443394443944539446394473944839449394503945139452394533945439455394563945739458394593946039461394623946339464394653946639467394683946939470394713947239473394743947539476394773947839479394803948139482394833948439485394863948739488394893949039491394923949339494394953949639497394983949939500395013950239503395043950539506395073950839509395103951139512395133951439515395163951739518395193952039521395223952339524395253952639527395283952939530395313953239533395343953539536395373953839539395403954139542395433954439545395463954739548395493955039551395523955339554395553955639557395583955939560395613956239563395643956539566395673956839569395703957139572395733957439575395763957739578395793958039581395823958339584395853958639587395883958939590395913959239593395943959539596395973959839599396003960139602396033960439605396063960739608396093961039611396123961339614396153961639617396183961939620396213962239623396243962539626396273962839629396303963139632396333963439635396363963739638396393964039641396423964339644396453964639647396483964939650396513965239653396543965539656396573965839659396603966139662396633966439665396663966739668396693967039671396723967339674396753967639677396783967939680396813968239683396843968539686396873968839689396903969139692396933969439695396963969739698396993970039701397023970339704397053970639707397083970939710397113971239713397143971539716397173971839719397203972139722397233972439725397263972739728397293973039731397323973339734397353973639737397383973939740397413974239743397443974539746397473974839749397503975139752397533975439755397563975739758397593976039761397623976339764397653976639767397683976939770397713977239773397743977539776397773977839779397803978139782397833978439785397863978739788397893979039791397923979339794397953979639797397983979939800398013980239803398043980539806398073980839809398103981139812398133981439815398163981739818398193982039821398223982339824398253982639827398283982939830398313983239833398343983539836398373983839839398403984139842398433984439845398463984739848398493985039851398523985339854398553985639857398583985939860398613986239863398643986539866398673986839869398703987139872398733987439875398763987739878398793988039881398823988339884398853988639887398883988939890398913989239893398943989539896398973989839899399003990139902399033990439905399063990739908399093991039911399123991339914399153991639917399183991939920399213992239923399243992539926399273992839929399303993139932399333993439935399363993739938399393994039941399423994339944399453994639947399483994939950399513995239953399543995539956399573995839959399603996139962399633996439965399663996739968399693997039971399723997339974399753997639977399783997939980399813998239983399843998539986399873998839989399903999139992399933999439995399963999739998399994000040001400024000340004400054000640007400084000940010400114001240013400144001540016400174001840019400204002140022400234002440025400264002740028400294003040031400324003340034400354003640037400384003940040400414004240043400444004540046400474004840049400504005140052400534005440055400564005740058400594006040061400624006340064400654006640067400684006940070400714007240073400744007540076400774007840079400804008140082400834008440085400864008740088400894009040091400924009340094400954009640097400984009940100401014010240103401044010540106401074010840109401104011140112401134011440115401164011740118401194012040121401224012340124401254012640127401284012940130401314013240133401344013540136401374013840139401404014140142401434014440145401464014740148401494015040151401524015340154401554015640157401584015940160401614016240163401644016540166401674016840169401704017140172401734017440175401764017740178401794018040181401824018340184401854018640187401884018940190401914019240193401944019540196401974019840199402004020140202402034020440205402064020740208402094021040211402124021340214402154021640217402184021940220402214022240223402244022540226402274022840229402304023140232402334023440235402364023740238402394024040241402424024340244402454024640247402484024940250402514025240253402544025540256402574025840259402604026140262402634026440265402664026740268402694027040271402724027340274402754027640277402784027940280402814028240283402844028540286402874028840289402904029140292402934029440295402964029740298402994030040301403024030340304403054030640307403084030940310403114031240313403144031540316403174031840319403204032140322403234032440325403264032740328403294033040331403324033340334403354033640337403384033940340403414034240343403444034540346403474034840349403504035140352403534035440355403564035740358403594036040361403624036340364403654036640367403684036940370403714037240373403744037540376403774037840379403804038140382403834038440385403864038740388403894039040391403924039340394403954039640397403984039940400404014040240403404044040540406404074040840409404104041140412404134041440415404164041740418404194042040421404224042340424404254042640427404284042940430404314043240433404344043540436404374043840439404404044140442404434044440445404464044740448404494045040451404524045340454404554045640457404584045940460404614046240463404644046540466404674046840469404704047140472404734047440475404764047740478404794048040481404824048340484404854048640487404884048940490404914049240493404944049540496404974049840499405004050140502405034050440505405064050740508405094051040511405124051340514405154051640517405184051940520405214052240523405244052540526405274052840529405304053140532405334053440535405364053740538405394054040541405424054340544405454054640547405484054940550405514055240553405544055540556405574055840559405604056140562405634056440565405664056740568405694057040571405724057340574405754057640577405784057940580405814058240583405844058540586405874058840589405904059140592405934059440595405964059740598405994060040601406024060340604406054060640607406084060940610406114061240613406144061540616406174061840619406204062140622406234062440625406264062740628406294063040631406324063340634406354063640637406384063940640406414064240643406444064540646406474064840649406504065140652406534065440655406564065740658406594066040661406624066340664406654066640667406684066940670406714067240673406744067540676406774067840679406804068140682406834068440685406864068740688406894069040691406924069340694406954069640697406984069940700407014070240703407044070540706407074070840709407104071140712407134071440715407164071740718407194072040721407224072340724407254072640727407284072940730407314073240733407344073540736407374073840739407404074140742407434074440745407464074740748407494075040751407524075340754407554075640757407584075940760407614076240763407644076540766407674076840769407704077140772407734077440775407764077740778407794078040781407824078340784407854078640787407884078940790407914079240793407944079540796407974079840799408004080140802408034080440805408064080740808408094081040811408124081340814408154081640817408184081940820408214082240823408244082540826408274082840829408304083140832408334083440835408364083740838408394084040841408424084340844408454084640847408484084940850408514085240853408544085540856408574085840859408604086140862408634086440865408664086740868408694087040871408724087340874408754087640877408784087940880408814088240883408844088540886408874088840889408904089140892408934089440895408964089740898408994090040901409024090340904409054090640907409084090940910409114091240913409144091540916409174091840919409204092140922409234092440925409264092740928409294093040931409324093340934409354093640937409384093940940409414094240943409444094540946409474094840949409504095140952409534095440955409564095740958409594096040961409624096340964409654096640967409684096940970409714097240973409744097540976409774097840979409804098140982409834098440985409864098740988409894099040991409924099340994409954099640997409984099941000410014100241003410044100541006410074100841009410104101141012410134101441015410164101741018410194102041021410224102341024410254102641027410284102941030410314103241033410344103541036410374103841039410404104141042410434104441045410464104741048410494105041051410524105341054410554105641057410584105941060410614106241063410644106541066410674106841069410704107141072410734107441075410764107741078410794108041081410824108341084410854108641087410884108941090410914109241093410944109541096410974109841099411004110141102411034110441105411064110741108411094111041111411124111341114411154111641117411184111941120411214112241123411244112541126411274112841129411304113141132411334113441135411364113741138411394114041141411424114341144411454114641147411484114941150411514115241153411544115541156411574115841159411604116141162411634116441165411664116741168411694117041171411724117341174411754117641177411784117941180411814118241183411844118541186411874118841189411904119141192411934119441195411964119741198411994120041201412024120341204412054120641207412084120941210412114121241213412144121541216412174121841219412204122141222412234122441225412264122741228412294123041231412324123341234412354123641237412384123941240412414124241243412444124541246412474124841249412504125141252412534125441255412564125741258412594126041261412624126341264412654126641267412684126941270412714127241273412744127541276412774127841279412804128141282412834128441285412864128741288412894129041291412924129341294412954129641297412984129941300413014130241303413044130541306413074130841309413104131141312413134131441315413164131741318413194132041321413224132341324413254132641327413284132941330413314133241333413344133541336413374133841339413404134141342413434134441345413464134741348413494135041351413524135341354413554135641357413584135941360413614136241363413644136541366413674136841369413704137141372413734137441375413764137741378413794138041381413824138341384413854138641387413884138941390413914139241393413944139541396413974139841399414004140141402414034140441405414064140741408414094141041411414124141341414414154141641417414184141941420414214142241423414244142541426414274142841429414304143141432414334143441435414364143741438414394144041441414424144341444414454144641447414484144941450414514145241453414544145541456414574145841459414604146141462414634146441465414664146741468414694147041471414724147341474414754147641477414784147941480414814148241483414844148541486414874148841489414904149141492414934149441495414964149741498414994150041501415024150341504415054150641507415084150941510415114151241513415144151541516415174151841519415204152141522415234152441525415264152741528415294153041531415324153341534415354153641537415384153941540415414154241543415444154541546415474154841549415504155141552415534155441555415564155741558415594156041561415624156341564415654156641567415684156941570415714157241573415744157541576415774157841579415804158141582415834158441585415864158741588415894159041591415924159341594415954159641597415984159941600416014160241603416044160541606416074160841609416104161141612416134161441615416164161741618416194162041621416224162341624416254162641627416284162941630416314163241633416344163541636416374163841639416404164141642416434164441645416464164741648416494165041651416524165341654416554165641657416584165941660416614166241663416644166541666416674166841669416704167141672416734167441675416764167741678416794168041681416824168341684416854168641687416884168941690416914169241693416944169541696416974169841699417004170141702417034170441705417064170741708417094171041711417124171341714417154171641717417184171941720417214172241723417244172541726417274172841729417304173141732417334173441735417364173741738417394174041741417424174341744417454174641747417484174941750417514175241753417544175541756417574175841759417604176141762417634176441765417664176741768417694177041771417724177341774417754177641777417784177941780417814178241783417844178541786417874178841789417904179141792417934179441795417964179741798417994180041801418024180341804418054180641807418084180941810418114181241813418144181541816418174181841819418204182141822418234182441825418264182741828418294183041831418324183341834418354183641837418384183941840418414184241843418444184541846418474184841849418504185141852418534185441855418564185741858418594186041861418624186341864418654186641867418684186941870418714187241873418744187541876418774187841879418804188141882418834188441885418864188741888418894189041891418924189341894418954189641897418984189941900419014190241903419044190541906419074190841909419104191141912419134191441915419164191741918419194192041921419224192341924419254192641927419284192941930419314193241933419344193541936419374193841939419404194141942419434194441945419464194741948419494195041951419524195341954419554195641957419584195941960419614196241963419644196541966419674196841969419704197141972419734197441975419764197741978419794198041981419824198341984419854198641987419884198941990419914199241993419944199541996419974199841999420004200142002420034200442005420064200742008420094201042011420124201342014420154201642017420184201942020420214202242023420244202542026420274202842029420304203142032420334203442035420364203742038420394204042041420424204342044420454204642047420484204942050420514205242053420544205542056420574205842059420604206142062420634206442065420664206742068420694207042071420724207342074420754207642077420784207942080420814208242083420844208542086420874208842089420904209142092420934209442095420964209742098420994210042101421024210342104421054210642107421084210942110421114211242113421144211542116421174211842119421204212142122421234212442125421264212742128421294213042131421324213342134421354213642137421384213942140421414214242143421444214542146421474214842149421504215142152421534215442155421564215742158421594216042161421624216342164421654216642167421684216942170421714217242173421744217542176421774217842179421804218142182421834218442185421864218742188421894219042191421924219342194421954219642197421984219942200422014220242203422044220542206422074220842209422104221142212422134221442215422164221742218422194222042221422224222342224422254222642227422284222942230422314223242233422344223542236422374223842239422404224142242422434224442245422464224742248422494225042251422524225342254422554225642257422584225942260422614226242263422644226542266422674226842269422704227142272422734227442275422764227742278422794228042281422824228342284422854228642287422884228942290422914229242293422944229542296422974229842299423004230142302423034230442305423064230742308423094231042311423124231342314423154231642317423184231942320423214232242323423244232542326423274232842329423304233142332423334233442335423364233742338423394234042341423424234342344423454234642347423484234942350423514235242353423544235542356423574235842359423604236142362423634236442365423664236742368423694237042371423724237342374423754237642377423784237942380423814238242383423844238542386423874238842389423904239142392423934239442395423964239742398423994240042401424024240342404424054240642407424084240942410424114241242413424144241542416424174241842419424204242142422424234242442425424264242742428424294243042431424324243342434424354243642437424384243942440424414244242443424444244542446424474244842449424504245142452424534245442455424564245742458424594246042461424624246342464424654246642467424684246942470424714247242473424744247542476424774247842479424804248142482424834248442485424864248742488424894249042491424924249342494424954249642497424984249942500425014250242503425044250542506425074250842509425104251142512425134251442515425164251742518425194252042521425224252342524425254252642527425284252942530425314253242533425344253542536425374253842539425404254142542425434254442545425464254742548425494255042551425524255342554425554255642557425584255942560425614256242563425644256542566425674256842569425704257142572425734257442575425764257742578425794258042581425824258342584425854258642587425884258942590425914259242593425944259542596425974259842599426004260142602426034260442605426064260742608426094261042611426124261342614426154261642617426184261942620426214262242623426244262542626426274262842629426304263142632426334263442635426364263742638426394264042641426424264342644426454264642647426484264942650426514265242653426544265542656426574265842659426604266142662426634266442665426664266742668426694267042671426724267342674426754267642677426784267942680426814268242683426844268542686426874268842689426904269142692426934269442695426964269742698426994270042701427024270342704427054270642707427084270942710427114271242713427144271542716427174271842719427204272142722427234272442725427264272742728427294273042731427324273342734427354273642737427384273942740427414274242743427444274542746427474274842749427504275142752427534275442755427564275742758427594276042761427624276342764427654276642767427684276942770427714277242773427744277542776427774277842779427804278142782427834278442785427864278742788427894279042791427924279342794427954279642797427984279942800428014280242803428044280542806428074280842809428104281142812428134281442815428164281742818428194282042821428224282342824428254282642827428284282942830428314283242833428344283542836428374283842839428404284142842428434284442845428464284742848428494285042851428524285342854428554285642857428584285942860428614286242863428644286542866428674286842869428704287142872428734287442875428764287742878428794288042881428824288342884428854288642887428884288942890428914289242893428944289542896428974289842899429004290142902429034290442905429064290742908429094291042911429124291342914429154291642917429184291942920429214292242923429244292542926429274292842929429304293142932429334293442935429364293742938429394294042941429424294342944429454294642947429484294942950429514295242953429544295542956429574295842959429604296142962429634296442965429664296742968429694297042971429724297342974429754297642977429784297942980429814298242983429844298542986429874298842989429904299142992429934299442995429964299742998429994300043001430024300343004430054300643007430084300943010430114301243013430144301543016430174301843019430204302143022430234302443025430264302743028430294303043031430324303343034430354303643037430384303943040430414304243043430444304543046430474304843049430504305143052430534305443055430564305743058430594306043061430624306343064430654306643067430684306943070430714307243073430744307543076430774307843079430804308143082430834308443085430864308743088430894309043091430924309343094430954309643097430984309943100431014310243103431044310543106431074310843109431104311143112431134311443115431164311743118431194312043121431224312343124431254312643127431284312943130431314313243133431344313543136431374313843139431404314143142431434314443145431464314743148431494315043151431524315343154431554315643157431584315943160431614316243163431644316543166431674316843169431704317143172431734317443175431764317743178431794318043181431824318343184431854318643187431884318943190431914319243193431944319543196431974319843199432004320143202432034320443205432064320743208432094321043211432124321343214432154321643217432184321943220432214322243223432244322543226432274322843229432304323143232432334323443235432364323743238432394324043241432424324343244432454324643247432484324943250432514325243253432544325543256432574325843259432604326143262432634326443265432664326743268432694327043271432724327343274432754327643277432784327943280432814328243283432844328543286432874328843289432904329143292432934329443295432964329743298432994330043301433024330343304433054330643307433084330943310433114331243313433144331543316433174331843319433204332143322433234332443325433264332743328433294333043331433324333343334433354333643337433384333943340433414334243343433444334543346433474334843349433504335143352433534335443355433564335743358433594336043361433624336343364433654336643367433684336943370433714337243373433744337543376433774337843379433804338143382433834338443385433864338743388433894339043391433924339343394433954339643397433984339943400434014340243403434044340543406434074340843409434104341143412434134341443415434164341743418434194342043421434224342343424434254342643427434284342943430434314343243433434344343543436434374343843439434404344143442434434344443445434464344743448434494345043451434524345343454434554345643457434584345943460434614346243463434644346543466434674346843469434704347143472434734347443475434764347743478434794348043481434824348343484434854348643487434884348943490434914349243493434944349543496434974349843499435004350143502435034350443505435064350743508435094351043511435124351343514435154351643517435184351943520435214352243523435244352543526435274352843529435304353143532435334353443535435364353743538435394354043541435424354343544435454354643547435484354943550435514355243553435544355543556435574355843559435604356143562435634356443565435664356743568435694357043571435724357343574435754357643577435784357943580435814358243583435844358543586435874358843589435904359143592435934359443595435964359743598435994360043601436024360343604436054360643607436084360943610436114361243613436144361543616436174361843619436204362143622436234362443625436264362743628436294363043631436324363343634436354363643637436384363943640436414364243643436444364543646436474364843649436504365143652436534365443655436564365743658436594366043661436624366343664436654366643667436684366943670436714367243673436744367543676436774367843679436804368143682436834368443685436864368743688436894369043691436924369343694436954369643697436984369943700437014370243703437044370543706437074370843709437104371143712437134371443715437164371743718437194372043721437224372343724437254372643727437284372943730437314373243733437344373543736437374373843739437404374143742437434374443745437464374743748437494375043751437524375343754437554375643757437584375943760437614376243763437644376543766437674376843769437704377143772437734377443775437764377743778437794378043781437824378343784437854378643787437884378943790437914379243793437944379543796437974379843799438004380143802438034380443805438064380743808438094381043811438124381343814438154381643817438184381943820438214382243823438244382543826438274382843829438304383143832438334383443835438364383743838438394384043841438424384343844438454384643847438484384943850438514385243853438544385543856438574385843859438604386143862438634386443865438664386743868438694387043871438724387343874438754387643877438784387943880438814388243883438844388543886438874388843889438904389143892438934389443895438964389743898438994390043901439024390343904439054390643907439084390943910439114391243913439144391543916439174391843919439204392143922439234392443925439264392743928439294393043931439324393343934439354393643937439384393943940439414394243943439444394543946439474394843949439504395143952439534395443955439564395743958439594396043961439624396343964439654396643967439684396943970439714397243973439744397543976439774397843979439804398143982439834398443985439864398743988439894399043991439924399343994439954399643997439984399944000440014400244003440044400544006440074400844009440104401144012440134401444015440164401744018440194402044021440224402344024440254402644027440284402944030440314403244033440344403544036440374403844039440404404144042440434404444045440464404744048440494405044051440524405344054440554405644057440584405944060440614406244063440644406544066440674406844069440704407144072440734407444075440764407744078440794408044081440824408344084440854408644087440884408944090440914409244093440944409544096440974409844099441004410144102441034410444105441064410744108441094411044111441124411344114441154411644117441184411944120441214412244123441244412544126441274412844129441304413144132441334413444135441364413744138441394414044141441424414344144441454414644147441484414944150441514415244153441544415544156441574415844159441604416144162441634416444165441664416744168441694417044171441724417344174441754417644177441784417944180441814418244183441844418544186441874418844189441904419144192441934419444195441964419744198441994420044201442024420344204442054420644207442084420944210442114421244213442144421544216442174421844219442204422144222442234422444225442264422744228442294423044231442324423344234442354423644237442384423944240442414424244243442444424544246442474424844249442504425144252442534425444255442564425744258442594426044261442624426344264442654426644267442684426944270442714427244273442744427544276442774427844279442804428144282442834428444285442864428744288442894429044291442924429344294442954429644297442984429944300443014430244303443044430544306443074430844309443104431144312443134431444315443164431744318443194432044321443224432344324443254432644327443284432944330443314433244333443344433544336443374433844339443404434144342443434434444345443464434744348443494435044351443524435344354443554435644357443584435944360443614436244363443644436544366443674436844369443704437144372443734437444375443764437744378443794438044381443824438344384443854438644387443884438944390443914439244393443944439544396443974439844399444004440144402444034440444405444064440744408444094441044411444124441344414444154441644417444184441944420444214442244423444244442544426444274442844429444304443144432444334443444435444364443744438444394444044441444424444344444444454444644447444484444944450444514445244453444544445544456444574445844459444604446144462444634446444465444664446744468444694447044471444724447344474444754447644477444784447944480444814448244483444844448544486444874448844489444904449144492444934449444495444964449744498444994450044501445024450344504445054450644507445084450944510445114451244513445144451544516445174451844519445204452144522445234452444525445264452744528445294453044531445324453344534445354453644537445384453944540445414454244543445444454544546445474454844549445504455144552445534455444555445564455744558445594456044561445624456344564445654456644567445684456944570445714457244573445744457544576445774457844579445804458144582445834458444585445864458744588445894459044591445924459344594445954459644597445984459944600446014460244603446044460544606446074460844609446104461144612446134461444615446164461744618446194462044621446224462344624446254462644627446284462944630446314463244633446344463544636446374463844639446404464144642446434464444645446464464744648446494465044651446524465344654446554465644657446584465944660446614466244663446644466544666446674466844669446704467144672446734467444675446764467744678446794468044681446824468344684446854468644687446884468944690446914469244693446944469544696446974469844699447004470144702447034470444705447064470744708447094471044711447124471344714447154471644717447184471944720447214472244723447244472544726447274472844729447304473144732447334473444735447364473744738447394474044741447424474344744447454474644747447484474944750447514475244753447544475544756447574475844759447604476144762447634476444765447664476744768447694477044771447724477344774447754477644777447784477944780447814478244783447844478544786447874478844789447904479144792447934479444795447964479744798447994480044801448024480344804448054480644807448084480944810448114481244813448144481544816448174481844819448204482144822448234482444825448264482744828448294483044831448324483344834448354483644837448384483944840448414484244843448444484544846448474484844849448504485144852448534485444855448564485744858448594486044861448624486344864448654486644867448684486944870448714487244873448744487544876448774487844879448804488144882448834488444885448864488744888448894489044891448924489344894448954489644897448984489944900449014490244903449044490544906449074490844909449104491144912449134491444915449164491744918449194492044921449224492344924449254492644927449284492944930449314493244933449344493544936449374493844939449404494144942449434494444945449464494744948449494495044951449524495344954449554495644957449584495944960449614496244963449644496544966449674496844969449704497144972449734497444975449764497744978449794498044981449824498344984449854498644987449884498944990449914499244993449944499544996449974499844999450004500145002450034500445005450064500745008450094501045011450124501345014450154501645017450184501945020450214502245023450244502545026450274502845029450304503145032450334503445035450364503745038450394504045041450424504345044450454504645047450484504945050450514505245053450544505545056450574505845059450604506145062450634506445065450664506745068450694507045071450724507345074450754507645077450784507945080450814508245083450844508545086450874508845089450904509145092450934509445095450964509745098450994510045101451024510345104451054510645107451084510945110451114511245113451144511545116451174511845119451204512145122451234512445125451264512745128451294513045131451324513345134451354513645137451384513945140451414514245143451444514545146451474514845149451504515145152451534515445155451564515745158451594516045161451624516345164451654516645167451684516945170451714517245173451744517545176451774517845179451804518145182451834518445185451864518745188451894519045191451924519345194451954519645197451984519945200452014520245203452044520545206452074520845209452104521145212452134521445215452164521745218452194522045221452224522345224452254522645227452284522945230452314523245233452344523545236452374523845239452404524145242452434524445245452464524745248452494525045251452524525345254452554525645257452584525945260452614526245263452644526545266452674526845269452704527145272452734527445275452764527745278452794528045281452824528345284452854528645287452884528945290452914529245293452944529545296452974529845299453004530145302453034530445305453064530745308453094531045311453124531345314453154531645317453184531945320453214532245323453244532545326453274532845329453304533145332453334533445335453364533745338453394534045341453424534345344453454534645347453484534945350453514535245353453544535545356453574535845359453604536145362453634536445365453664536745368453694537045371453724537345374453754537645377453784537945380453814538245383453844538545386453874538845389453904539145392453934539445395453964539745398453994540045401454024540345404454054540645407454084540945410454114541245413454144541545416454174541845419454204542145422454234542445425454264542745428454294543045431454324543345434454354543645437454384543945440454414544245443454444544545446454474544845449454504545145452454534545445455454564545745458454594546045461454624546345464454654546645467454684546945470454714547245473454744547545476454774547845479454804548145482454834548445485454864548745488454894549045491454924549345494454954549645497454984549945500455014550245503455044550545506455074550845509455104551145512455134551445515455164551745518455194552045521455224552345524455254552645527455284552945530455314553245533455344553545536455374553845539455404554145542455434554445545455464554745548455494555045551455524555345554455554555645557455584555945560455614556245563455644556545566455674556845569455704557145572455734557445575455764557745578455794558045581455824558345584455854558645587455884558945590455914559245593455944559545596455974559845599456004560145602456034560445605456064560745608456094561045611456124561345614456154561645617456184561945620456214562245623456244562545626456274562845629456304563145632456334563445635456364563745638456394564045641456424564345644456454564645647456484564945650456514565245653456544565545656456574565845659456604566145662456634566445665456664566745668456694567045671456724567345674456754567645677456784567945680456814568245683456844568545686456874568845689456904569145692456934569445695456964569745698456994570045701457024570345704457054570645707457084570945710457114571245713457144571545716457174571845719457204572145722457234572445725457264572745728457294573045731457324573345734457354573645737457384573945740457414574245743457444574545746457474574845749457504575145752457534575445755457564575745758457594576045761457624576345764457654576645767457684576945770457714577245773457744577545776457774577845779457804578145782457834578445785457864578745788457894579045791457924579345794457954579645797457984579945800458014580245803458044580545806458074580845809458104581145812458134581445815458164581745818458194582045821458224582345824458254582645827458284582945830458314583245833458344583545836458374583845839458404584145842458434584445845458464584745848458494585045851458524585345854458554585645857458584585945860458614586245863458644586545866458674586845869458704587145872458734587445875458764587745878458794588045881458824588345884458854588645887458884588945890458914589245893458944589545896458974589845899459004590145902459034590445905459064590745908459094591045911459124591345914459154591645917459184591945920459214592245923459244592545926459274592845929459304593145932459334593445935459364593745938459394594045941459424594345944459454594645947459484594945950459514595245953459544595545956459574595845959459604596145962459634596445965459664596745968459694597045971459724597345974459754597645977459784597945980459814598245983459844598545986459874598845989459904599145992459934599445995459964599745998459994600046001460024600346004460054600646007460084600946010460114601246013460144601546016460174601846019460204602146022460234602446025460264602746028460294603046031460324603346034460354603646037460384603946040460414604246043460444604546046460474604846049460504605146052460534605446055460564605746058460594606046061460624606346064460654606646067460684606946070460714607246073460744607546076460774607846079460804608146082460834608446085460864608746088460894609046091460924609346094460954609646097460984609946100461014610246103461044610546106461074610846109461104611146112461134611446115461164611746118461194612046121461224612346124461254612646127461284612946130461314613246133461344613546136461374613846139461404614146142461434614446145461464614746148461494615046151461524615346154461554615646157461584615946160461614616246163461644616546166461674616846169461704617146172461734617446175461764617746178461794618046181461824618346184461854618646187461884618946190461914619246193461944619546196461974619846199462004620146202462034620446205462064620746208462094621046211462124621346214462154621646217462184621946220462214622246223462244622546226462274622846229462304623146232462334623446235462364623746238462394624046241462424624346244462454624646247462484624946250462514625246253462544625546256462574625846259462604626146262462634626446265462664626746268462694627046271462724627346274462754627646277462784627946280462814628246283462844628546286462874628846289462904629146292462934629446295462964629746298462994630046301463024630346304463054630646307463084630946310463114631246313463144631546316463174631846319463204632146322463234632446325463264632746328463294633046331463324633346334463354633646337463384633946340463414634246343463444634546346463474634846349463504635146352463534635446355463564635746358463594636046361463624636346364463654636646367463684636946370463714637246373463744637546376463774637846379463804638146382463834638446385463864638746388463894639046391463924639346394463954639646397463984639946400464014640246403464044640546406464074640846409464104641146412464134641446415464164641746418464194642046421464224642346424464254642646427464284642946430464314643246433464344643546436464374643846439464404644146442464434644446445464464644746448464494645046451464524645346454464554645646457464584645946460464614646246463464644646546466464674646846469464704647146472464734647446475464764647746478464794648046481464824648346484464854648646487464884648946490464914649246493464944649546496464974649846499465004650146502465034650446505465064650746508465094651046511465124651346514465154651646517465184651946520465214652246523465244652546526465274652846529465304653146532465334653446535465364653746538465394654046541465424654346544465454654646547465484654946550465514655246553465544655546556465574655846559465604656146562465634656446565465664656746568465694657046571465724657346574465754657646577465784657946580465814658246583465844658546586465874658846589465904659146592465934659446595465964659746598465994660046601466024660346604466054660646607466084660946610466114661246613466144661546616466174661846619466204662146622466234662446625466264662746628466294663046631466324663346634466354663646637466384663946640466414664246643466444664546646466474664846649466504665146652466534665446655466564665746658466594666046661466624666346664466654666646667466684666946670466714667246673466744667546676466774667846679466804668146682466834668446685466864668746688466894669046691466924669346694466954669646697466984669946700467014670246703467044670546706467074670846709467104671146712467134671446715467164671746718467194672046721467224672346724467254672646727467284672946730467314673246733467344673546736467374673846739467404674146742467434674446745467464674746748467494675046751467524675346754467554675646757467584675946760467614676246763467644676546766467674676846769467704677146772467734677446775467764677746778467794678046781467824678346784467854678646787467884678946790467914679246793467944679546796467974679846799468004680146802468034680446805468064680746808468094681046811468124681346814468154681646817468184681946820468214682246823468244682546826468274682846829468304683146832468334683446835468364683746838468394684046841468424684346844468454684646847468484684946850468514685246853468544685546856468574685846859468604686146862468634686446865468664686746868468694687046871468724687346874468754687646877468784687946880468814688246883468844688546886468874688846889468904689146892468934689446895468964689746898468994690046901469024690346904469054690646907469084690946910469114691246913469144691546916469174691846919469204692146922469234692446925469264692746928469294693046931469324693346934469354693646937469384693946940469414694246943469444694546946469474694846949469504695146952469534695446955469564695746958469594696046961469624696346964469654696646967469684696946970469714697246973469744697546976469774697846979469804698146982469834698446985469864698746988469894699046991469924699346994469954699646997469984699947000470014700247003470044700547006470074700847009470104701147012470134701447015470164701747018470194702047021470224702347024470254702647027470284702947030470314703247033470344703547036470374703847039470404704147042470434704447045470464704747048470494705047051470524705347054470554705647057470584705947060470614706247063470644706547066470674706847069470704707147072470734707447075470764707747078470794708047081470824708347084470854708647087470884708947090470914709247093470944709547096470974709847099471004710147102471034710447105471064710747108471094711047111471124711347114471154711647117471184711947120471214712247123471244712547126471274712847129471304713147132471334713447135471364713747138471394714047141471424714347144471454714647147471484714947150471514715247153471544715547156471574715847159471604716147162471634716447165471664716747168471694717047171471724717347174471754717647177471784717947180471814718247183471844718547186471874718847189471904719147192471934719447195471964719747198471994720047201472024720347204472054720647207472084720947210472114721247213472144721547216472174721847219472204722147222472234722447225472264722747228472294723047231472324723347234472354723647237472384723947240472414724247243472444724547246472474724847249472504725147252472534725447255472564725747258472594726047261472624726347264472654726647267472684726947270472714727247273472744727547276472774727847279472804728147282472834728447285472864728747288472894729047291472924729347294472954729647297472984729947300473014730247303473044730547306473074730847309473104731147312473134731447315473164731747318473194732047321473224732347324473254732647327473284732947330473314733247333473344733547336473374733847339473404734147342473434734447345473464734747348473494735047351473524735347354473554735647357473584735947360473614736247363473644736547366473674736847369473704737147372473734737447375473764737747378473794738047381473824738347384473854738647387473884738947390473914739247393473944739547396473974739847399474004740147402474034740447405474064740747408474094741047411474124741347414474154741647417474184741947420474214742247423474244742547426474274742847429474304743147432474334743447435474364743747438474394744047441474424744347444474454744647447474484744947450474514745247453474544745547456474574745847459474604746147462474634746447465474664746747468474694747047471474724747347474474754747647477474784747947480474814748247483474844748547486474874748847489474904749147492474934749447495474964749747498474994750047501475024750347504475054750647507475084750947510475114751247513475144751547516475174751847519475204752147522475234752447525475264752747528475294753047531475324753347534475354753647537475384753947540475414754247543475444754547546475474754847549475504755147552475534755447555475564755747558475594756047561475624756347564475654756647567475684756947570475714757247573475744757547576475774757847579475804758147582475834758447585475864758747588475894759047591475924759347594475954759647597475984759947600476014760247603476044760547606476074760847609476104761147612476134761447615476164761747618476194762047621476224762347624476254762647627476284762947630476314763247633476344763547636476374763847639476404764147642476434764447645476464764747648476494765047651476524765347654476554765647657476584765947660476614766247663476644766547666476674766847669476704767147672476734767447675476764767747678476794768047681476824768347684476854768647687476884768947690476914769247693476944769547696476974769847699477004770147702477034770447705477064770747708477094771047711477124771347714477154771647717477184771947720477214772247723477244772547726477274772847729477304773147732477334773447735477364773747738477394774047741477424774347744477454774647747477484774947750477514775247753477544775547756477574775847759477604776147762477634776447765477664776747768477694777047771477724777347774477754777647777477784777947780477814778247783477844778547786477874778847789477904779147792477934779447795477964779747798477994780047801478024780347804478054780647807478084780947810478114781247813478144781547816478174781847819478204782147822478234782447825478264782747828478294783047831478324783347834478354783647837478384783947840478414784247843478444784547846478474784847849478504785147852478534785447855478564785747858478594786047861478624786347864478654786647867478684786947870478714787247873478744787547876478774787847879478804788147882478834788447885478864788747888478894789047891478924789347894478954789647897478984789947900479014790247903479044790547906479074790847909479104791147912479134791447915479164791747918479194792047921479224792347924479254792647927479284792947930479314793247933479344793547936479374793847939479404794147942479434794447945479464794747948479494795047951479524795347954479554795647957479584795947960479614796247963479644796547966479674796847969479704797147972479734797447975479764797747978479794798047981479824798347984479854798647987479884798947990479914799247993479944799547996479974799847999480004800148002480034800448005480064800748008480094801048011480124801348014480154801648017480184801948020480214802248023480244802548026480274802848029480304803148032480334803448035480364803748038480394804048041480424804348044480454804648047480484804948050480514805248053480544805548056480574805848059480604806148062480634806448065480664806748068480694807048071480724807348074480754807648077480784807948080480814808248083480844808548086480874808848089480904809148092480934809448095480964809748098480994810048101481024810348104481054810648107481084810948110481114811248113481144811548116481174811848119481204812148122481234812448125481264812748128481294813048131481324813348134481354813648137481384813948140481414814248143481444814548146481474814848149481504815148152481534815448155481564815748158481594816048161481624816348164481654816648167481684816948170481714817248173481744817548176481774817848179481804818148182481834818448185481864818748188481894819048191481924819348194481954819648197481984819948200482014820248203482044820548206482074820848209482104821148212482134821448215482164821748218482194822048221482224822348224482254822648227482284822948230482314823248233482344823548236482374823848239482404824148242482434824448245482464824748248482494825048251482524825348254482554825648257482584825948260482614826248263482644826548266482674826848269482704827148272482734827448275482764827748278482794828048281482824828348284482854828648287482884828948290482914829248293482944829548296482974829848299483004830148302483034830448305483064830748308483094831048311483124831348314483154831648317483184831948320483214832248323483244832548326483274832848329483304833148332483334833448335483364833748338483394834048341483424834348344483454834648347483484834948350483514835248353483544835548356483574835848359483604836148362483634836448365483664836748368483694837048371483724837348374483754837648377483784837948380483814838248383483844838548386483874838848389483904839148392483934839448395483964839748398483994840048401484024840348404484054840648407484084840948410484114841248413484144841548416484174841848419484204842148422484234842448425484264842748428484294843048431484324843348434484354843648437484384843948440484414844248443484444844548446484474844848449484504845148452484534845448455484564845748458484594846048461484624846348464484654846648467484684846948470484714847248473484744847548476484774847848479484804848148482484834848448485484864848748488484894849048491484924849348494484954849648497484984849948500485014850248503485044850548506485074850848509485104851148512485134851448515485164851748518485194852048521485224852348524485254852648527485284852948530485314853248533485344853548536485374853848539485404854148542485434854448545485464854748548485494855048551485524855348554485554855648557485584855948560485614856248563485644856548566485674856848569485704857148572485734857448575485764857748578485794858048581485824858348584485854858648587485884858948590485914859248593485944859548596485974859848599486004860148602486034860448605486064860748608486094861048611486124861348614486154861648617486184861948620486214862248623486244862548626486274862848629486304863148632486334863448635486364863748638486394864048641486424864348644486454864648647486484864948650486514865248653486544865548656486574865848659486604866148662486634866448665486664866748668486694867048671486724867348674486754867648677486784867948680486814868248683486844868548686486874868848689486904869148692486934869448695486964869748698486994870048701487024870348704487054870648707487084870948710487114871248713487144871548716487174871848719487204872148722487234872448725487264872748728487294873048731487324873348734487354873648737487384873948740487414874248743487444874548746487474874848749487504875148752487534875448755487564875748758487594876048761487624876348764487654876648767487684876948770487714877248773487744877548776487774877848779487804878148782487834878448785487864878748788487894879048791487924879348794487954879648797487984879948800488014880248803488044880548806488074880848809488104881148812488134881448815488164881748818488194882048821488224882348824488254882648827488284882948830488314883248833488344883548836488374883848839488404884148842488434884448845488464884748848488494885048851488524885348854488554885648857488584885948860488614886248863488644886548866488674886848869488704887148872488734887448875488764887748878488794888048881488824888348884488854888648887488884888948890488914889248893488944889548896488974889848899489004890148902489034890448905489064890748908489094891048911489124891348914489154891648917489184891948920489214892248923489244892548926489274892848929489304893148932489334893448935489364893748938489394894048941489424894348944489454894648947489484894948950489514895248953489544895548956489574895848959489604896148962489634896448965489664896748968489694897048971489724897348974489754897648977489784897948980489814898248983489844898548986489874898848989489904899148992489934899448995489964899748998489994900049001490024900349004490054900649007490084900949010490114901249013490144901549016490174901849019490204902149022490234902449025490264902749028490294903049031490324903349034490354903649037490384903949040490414904249043490444904549046490474904849049490504905149052490534905449055490564905749058490594906049061490624906349064490654906649067490684906949070490714907249073490744907549076490774907849079490804908149082490834908449085490864908749088490894909049091490924909349094490954909649097490984909949100491014910249103491044910549106491074910849109491104911149112491134911449115491164911749118491194912049121491224912349124491254912649127491284912949130491314913249133491344913549136491374913849139491404914149142491434914449145491464914749148491494915049151491524915349154491554915649157491584915949160491614916249163491644916549166491674916849169491704917149172491734917449175491764917749178491794918049181491824918349184491854918649187491884918949190491914919249193491944919549196491974919849199492004920149202492034920449205492064920749208492094921049211492124921349214492154921649217492184921949220492214922249223492244922549226492274922849229492304923149232492334923449235492364923749238492394924049241492424924349244492454924649247492484924949250492514925249253492544925549256492574925849259492604926149262492634926449265492664926749268492694927049271492724927349274492754927649277492784927949280492814928249283492844928549286492874928849289492904929149292492934929449295492964929749298492994930049301493024930349304493054930649307493084930949310493114931249313493144931549316493174931849319493204932149322493234932449325493264932749328493294933049331493324933349334493354933649337493384933949340493414934249343493444934549346493474934849349493504935149352493534935449355493564935749358493594936049361493624936349364493654936649367493684936949370493714937249373493744937549376493774937849379493804938149382493834938449385493864938749388493894939049391493924939349394493954939649397493984939949400494014940249403494044940549406494074940849409494104941149412494134941449415494164941749418494194942049421494224942349424494254942649427494284942949430494314943249433494344943549436494374943849439494404944149442494434944449445494464944749448494494945049451494524945349454494554945649457494584945949460494614946249463494644946549466494674946849469494704947149472494734947449475494764947749478494794948049481494824948349484494854948649487494884948949490494914949249493494944949549496494974949849499495004950149502495034950449505495064950749508495094951049511495124951349514495154951649517495184951949520495214952249523495244952549526495274952849529495304953149532495334953449535495364953749538495394954049541495424954349544495454954649547495484954949550495514955249553495544955549556495574955849559495604956149562495634956449565495664956749568495694957049571495724957349574495754957649577495784957949580495814958249583495844958549586495874958849589495904959149592495934959449595495964959749598495994960049601496024960349604496054960649607496084960949610496114961249613496144961549616496174961849619496204962149622496234962449625496264962749628496294963049631496324963349634496354963649637496384963949640496414964249643496444964549646496474964849649496504965149652496534965449655496564965749658496594966049661496624966349664496654966649667496684966949670496714967249673496744967549676496774967849679496804968149682496834968449685496864968749688496894969049691496924969349694496954969649697496984969949700497014970249703497044970549706497074970849709497104971149712497134971449715497164971749718497194972049721497224972349724497254972649727497284972949730497314973249733497344973549736497374973849739497404974149742497434974449745497464974749748497494975049751497524975349754497554975649757497584975949760497614976249763497644976549766497674976849769497704977149772497734977449775497764977749778497794978049781497824978349784497854978649787497884978949790497914979249793497944979549796497974979849799498004980149802498034980449805498064980749808498094981049811498124981349814498154981649817498184981949820498214982249823498244982549826498274982849829498304983149832498334983449835498364983749838498394984049841498424984349844498454984649847498484984949850498514985249853498544985549856498574985849859498604986149862498634986449865498664986749868498694987049871498724987349874498754987649877498784987949880498814988249883498844988549886498874988849889498904989149892498934989449895498964989749898498994990049901499024990349904499054990649907499084990949910499114991249913499144991549916499174991849919499204992149922499234992449925499264992749928499294993049931499324993349934499354993649937499384993949940499414994249943499444994549946499474994849949499504995149952499534995449955499564995749958499594996049961499624996349964499654996649967499684996949970499714997249973499744997549976499774997849979499804998149982499834998449985499864998749988499894999049991499924999349994499954999649997499984999950000500015000250003500045000550006500075000850009500105001150012500135001450015500165001750018500195002050021500225002350024500255002650027500285002950030500315003250033500345003550036500375003850039500405004150042500435004450045500465004750048500495005050051500525005350054500555005650057500585005950060500615006250063500645006550066500675006850069500705007150072500735007450075500765007750078500795008050081500825008350084500855008650087500885008950090500915009250093500945009550096500975009850099501005010150102501035010450105501065010750108501095011050111501125011350114501155011650117501185011950120501215012250123501245012550126501275012850129501305013150132501335013450135501365013750138501395014050141501425014350144501455014650147501485014950150501515015250153501545015550156501575015850159501605016150162501635016450165501665016750168501695017050171501725017350174501755017650177501785017950180501815018250183501845018550186501875018850189501905019150192501935019450195501965019750198501995020050201502025020350204502055020650207502085020950210502115021250213502145021550216502175021850219502205022150222502235022450225502265022750228502295023050231502325023350234502355023650237502385023950240502415024250243502445024550246502475024850249502505025150252502535025450255502565025750258502595026050261502625026350264502655026650267502685026950270502715027250273502745027550276502775027850279502805028150282502835028450285502865028750288502895029050291502925029350294502955029650297502985029950300503015030250303503045030550306503075030850309503105031150312503135031450315503165031750318503195032050321503225032350324503255032650327503285032950330503315033250333503345033550336503375033850339503405034150342503435034450345503465034750348503495035050351503525035350354503555035650357503585035950360503615036250363503645036550366503675036850369503705037150372503735037450375503765037750378503795038050381503825038350384503855038650387503885038950390503915039250393503945039550396503975039850399504005040150402504035040450405504065040750408504095041050411504125041350414504155041650417504185041950420504215042250423504245042550426504275042850429504305043150432504335043450435504365043750438504395044050441504425044350444504455044650447504485044950450504515045250453504545045550456504575045850459504605046150462504635046450465504665046750468504695047050471504725047350474504755047650477504785047950480504815048250483504845048550486504875048850489504905049150492504935049450495504965049750498504995050050501505025050350504505055050650507505085050950510505115051250513505145051550516505175051850519505205052150522505235052450525505265052750528505295053050531505325053350534505355053650537505385053950540505415054250543505445054550546505475054850549505505055150552505535055450555505565055750558505595056050561505625056350564505655056650567505685056950570505715057250573505745057550576505775057850579505805058150582505835058450585505865058750588505895059050591505925059350594505955059650597505985059950600506015060250603506045060550606506075060850609506105061150612506135061450615506165061750618506195062050621506225062350624506255062650627506285062950630506315063250633506345063550636506375063850639506405064150642506435064450645506465064750648506495065050651506525065350654506555065650657506585065950660506615066250663506645066550666506675066850669506705067150672506735067450675506765067750678506795068050681506825068350684506855068650687506885068950690506915069250693506945069550696506975069850699507005070150702507035070450705507065070750708507095071050711507125071350714507155071650717507185071950720507215072250723507245072550726507275072850729507305073150732507335073450735507365073750738507395074050741507425074350744507455074650747507485074950750507515075250753507545075550756507575075850759507605076150762507635076450765507665076750768507695077050771507725077350774507755077650777507785077950780507815078250783507845078550786507875078850789507905079150792507935079450795507965079750798507995080050801508025080350804508055080650807508085080950810508115081250813508145081550816508175081850819508205082150822508235082450825508265082750828508295083050831508325083350834508355083650837508385083950840508415084250843508445084550846508475084850849508505085150852508535085450855508565085750858508595086050861508625086350864508655086650867508685086950870508715087250873508745087550876508775087850879508805088150882508835088450885508865088750888508895089050891508925089350894508955089650897508985089950900509015090250903509045090550906509075090850909509105091150912509135091450915509165091750918509195092050921509225092350924509255092650927509285092950930509315093250933509345093550936509375093850939509405094150942509435094450945509465094750948509495095050951509525095350954509555095650957509585095950960509615096250963509645096550966509675096850969509705097150972509735097450975509765097750978509795098050981509825098350984509855098650987509885098950990509915099250993509945099550996509975099850999510005100151002510035100451005510065100751008510095101051011510125101351014510155101651017510185101951020510215102251023510245102551026510275102851029510305103151032510335103451035510365103751038510395104051041510425104351044510455104651047510485104951050510515105251053510545105551056510575105851059510605106151062510635106451065510665106751068510695107051071510725107351074510755107651077510785107951080510815108251083510845108551086510875108851089510905109151092510935109451095510965109751098510995110051101511025110351104511055110651107511085110951110511115111251113511145111551116511175111851119511205112151122511235112451125511265112751128511295113051131511325113351134511355113651137511385113951140511415114251143511445114551146511475114851149511505115151152511535115451155511565115751158511595116051161511625116351164511655116651167511685116951170511715117251173511745117551176511775117851179511805118151182511835118451185511865118751188511895119051191511925119351194511955119651197511985119951200512015120251203512045120551206512075120851209512105121151212512135121451215512165121751218512195122051221512225122351224512255122651227512285122951230512315123251233512345123551236512375123851239512405124151242512435124451245512465124751248512495125051251512525125351254512555125651257512585125951260512615126251263512645126551266512675126851269512705127151272512735127451275512765127751278512795128051281512825128351284512855128651287512885128951290512915129251293512945129551296512975129851299513005130151302513035130451305513065130751308513095131051311513125131351314513155131651317513185131951320513215132251323513245132551326513275132851329513305133151332513335133451335513365133751338513395134051341513425134351344513455134651347513485134951350513515135251353513545135551356513575135851359513605136151362513635136451365513665136751368513695137051371513725137351374513755137651377513785137951380513815138251383513845138551386513875138851389513905139151392513935139451395513965139751398513995140051401514025140351404514055140651407514085140951410514115141251413514145141551416514175141851419514205142151422514235142451425514265142751428514295143051431514325143351434514355143651437514385143951440514415144251443514445144551446514475144851449514505145151452514535145451455514565145751458514595146051461514625146351464514655146651467514685146951470514715147251473514745147551476514775147851479514805148151482514835148451485514865148751488514895149051491514925149351494514955149651497514985149951500515015150251503515045150551506515075150851509515105151151512515135151451515515165151751518515195152051521515225152351524515255152651527515285152951530515315153251533515345153551536515375153851539515405154151542515435154451545515465154751548515495155051551515525155351554515555155651557515585155951560515615156251563515645156551566515675156851569515705157151572515735157451575515765157751578515795158051581515825158351584515855158651587515885158951590515915159251593515945159551596515975159851599516005160151602516035160451605516065160751608516095161051611516125161351614516155161651617516185161951620516215162251623516245162551626516275162851629516305163151632516335163451635516365163751638516395164051641516425164351644516455164651647516485164951650516515165251653516545165551656516575165851659516605166151662516635166451665516665166751668516695167051671516725167351674516755167651677516785167951680516815168251683516845168551686516875168851689516905169151692516935169451695516965169751698516995170051701517025170351704517055170651707517085170951710517115171251713517145171551716517175171851719517205172151722517235172451725517265172751728517295173051731517325173351734517355173651737517385173951740517415174251743517445174551746517475174851749517505175151752517535175451755517565175751758517595176051761517625176351764517655176651767517685176951770517715177251773517745177551776517775177851779517805178151782517835178451785517865178751788517895179051791517925179351794517955179651797517985179951800518015180251803518045180551806518075180851809518105181151812518135181451815518165181751818518195182051821518225182351824518255182651827518285182951830518315183251833518345183551836518375183851839518405184151842518435184451845518465184751848518495185051851518525185351854518555185651857518585185951860518615186251863518645186551866518675186851869518705187151872518735187451875518765187751878518795188051881518825188351884518855188651887518885188951890518915189251893518945189551896518975189851899519005190151902519035190451905519065190751908519095191051911519125191351914519155191651917519185191951920519215192251923519245192551926519275192851929519305193151932519335193451935519365193751938519395194051941519425194351944519455194651947519485194951950519515195251953519545195551956519575195851959519605196151962519635196451965519665196751968519695197051971519725197351974519755197651977519785197951980519815198251983519845198551986519875198851989519905199151992519935199451995519965199751998519995200052001520025200352004520055200652007520085200952010520115201252013520145201552016520175201852019520205202152022520235202452025520265202752028520295203052031520325203352034520355203652037520385203952040520415204252043520445204552046520475204852049520505205152052520535205452055520565205752058520595206052061520625206352064520655206652067520685206952070520715207252073520745207552076520775207852079520805208152082520835208452085520865208752088520895209052091520925209352094520955209652097520985209952100521015210252103521045210552106521075210852109521105211152112521135211452115521165211752118521195212052121521225212352124521255212652127521285212952130521315213252133521345213552136521375213852139521405214152142521435214452145521465214752148521495215052151521525215352154521555215652157521585215952160521615216252163521645216552166521675216852169521705217152172521735217452175521765217752178521795218052181521825218352184521855218652187521885218952190521915219252193521945219552196521975219852199522005220152202522035220452205522065220752208522095221052211522125221352214522155221652217522185221952220522215222252223522245222552226522275222852229522305223152232522335223452235522365223752238522395224052241522425224352244522455224652247522485224952250522515225252253522545225552256522575225852259522605226152262522635226452265522665226752268522695227052271522725227352274522755227652277522785227952280522815228252283522845228552286522875228852289522905229152292522935229452295522965229752298522995230052301523025230352304523055230652307523085230952310523115231252313523145231552316523175231852319523205232152322523235232452325523265232752328523295233052331523325233352334523355233652337523385233952340523415234252343523445234552346523475234852349523505235152352523535235452355523565235752358523595236052361523625236352364523655236652367523685236952370523715237252373523745237552376523775237852379523805238152382523835238452385523865238752388523895239052391523925239352394523955239652397523985239952400524015240252403524045240552406524075240852409524105241152412524135241452415524165241752418524195242052421524225242352424524255242652427524285242952430524315243252433524345243552436524375243852439524405244152442524435244452445524465244752448524495245052451524525245352454524555245652457524585245952460524615246252463524645246552466524675246852469524705247152472524735247452475524765247752478524795248052481524825248352484524855248652487524885248952490524915249252493524945249552496524975249852499525005250152502525035250452505525065250752508525095251052511525125251352514525155251652517525185251952520525215252252523525245252552526525275252852529525305253152532525335253452535525365253752538525395254052541525425254352544525455254652547525485254952550525515255252553525545255552556525575255852559525605256152562525635256452565525665256752568525695257052571525725257352574525755257652577525785257952580525815258252583525845258552586525875258852589525905259152592525935259452595525965259752598525995260052601526025260352604526055260652607526085260952610526115261252613526145261552616526175261852619526205262152622526235262452625526265262752628526295263052631526325263352634526355263652637526385263952640526415264252643526445264552646526475264852649526505265152652526535265452655526565265752658526595266052661526625266352664526655266652667526685266952670526715267252673526745267552676526775267852679526805268152682526835268452685526865268752688526895269052691526925269352694526955269652697526985269952700527015270252703527045270552706527075270852709527105271152712527135271452715527165271752718527195272052721527225272352724527255272652727527285272952730527315273252733527345273552736527375273852739527405274152742527435274452745527465274752748527495275052751527525275352754527555275652757527585275952760527615276252763527645276552766527675276852769527705277152772527735277452775527765277752778527795278052781527825278352784527855278652787527885278952790527915279252793527945279552796527975279852799528005280152802528035280452805528065280752808528095281052811528125281352814528155281652817528185281952820528215282252823528245282552826528275282852829528305283152832528335283452835528365283752838528395284052841528425284352844528455284652847528485284952850528515285252853528545285552856528575285852859528605286152862528635286452865528665286752868528695287052871528725287352874528755287652877528785287952880528815288252883528845288552886528875288852889528905289152892528935289452895528965289752898528995290052901529025290352904529055290652907529085290952910529115291252913529145291552916529175291852919529205292152922529235292452925529265292752928529295293052931529325293352934529355293652937529385293952940529415294252943529445294552946529475294852949529505295152952529535295452955529565295752958529595296052961529625296352964529655296652967529685296952970529715297252973529745297552976529775297852979529805298152982529835298452985529865298752988529895299052991529925299352994529955299652997529985299953000530015300253003530045300553006530075300853009530105301153012530135301453015530165301753018530195302053021530225302353024530255302653027530285302953030530315303253033530345303553036530375303853039530405304153042530435304453045530465304753048530495305053051530525305353054530555305653057530585305953060530615306253063530645306553066530675306853069530705307153072530735307453075530765307753078530795308053081530825308353084530855308653087530885308953090530915309253093530945309553096530975309853099531005310153102531035310453105531065310753108531095311053111531125311353114531155311653117531185311953120531215312253123531245312553126531275312853129531305313153132531335313453135531365313753138531395314053141531425314353144531455314653147531485314953150531515315253153531545315553156531575315853159531605316153162531635316453165531665316753168531695317053171531725317353174531755317653177531785317953180531815318253183531845318553186531875318853189531905319153192531935319453195531965319753198531995320053201532025320353204532055320653207532085320953210532115321253213532145321553216532175321853219532205322153222532235322453225532265322753228532295323053231532325323353234532355323653237532385323953240532415324253243532445324553246532475324853249532505325153252532535325453255532565325753258532595326053261532625326353264532655326653267532685326953270532715327253273532745327553276532775327853279532805328153282532835328453285532865328753288532895329053291532925329353294532955329653297532985329953300533015330253303533045330553306533075330853309533105331153312533135331453315533165331753318533195332053321533225332353324533255332653327533285332953330533315333253333533345333553336533375333853339533405334153342533435334453345533465334753348533495335053351533525335353354533555335653357533585335953360533615336253363533645336553366533675336853369533705337153372533735337453375533765337753378533795338053381533825338353384533855338653387533885338953390533915339253393533945339553396533975339853399534005340153402534035340453405534065340753408534095341053411534125341353414534155341653417534185341953420534215342253423534245342553426534275342853429534305343153432534335343453435534365343753438534395344053441534425344353444534455344653447534485344953450534515345253453534545345553456534575345853459534605346153462534635346453465534665346753468534695347053471534725347353474534755347653477534785347953480534815348253483534845348553486534875348853489534905349153492534935349453495534965349753498534995350053501535025350353504535055350653507535085350953510535115351253513535145351553516535175351853519535205352153522535235352453525535265352753528535295353053531535325353353534535355353653537535385353953540535415354253543535445354553546535475354853549535505355153552535535355453555535565355753558535595356053561535625356353564535655356653567535685356953570535715357253573535745357553576535775357853579535805358153582535835358453585535865358753588535895359053591535925359353594535955359653597535985359953600536015360253603536045360553606536075360853609536105361153612536135361453615536165361753618536195362053621536225362353624536255362653627536285362953630536315363253633536345363553636536375363853639536405364153642536435364453645536465364753648536495365053651536525365353654536555365653657536585365953660536615366253663536645366553666536675366853669536705367153672536735367453675536765367753678536795368053681536825368353684536855368653687536885368953690536915369253693536945369553696536975369853699537005370153702537035370453705537065370753708537095371053711537125371353714537155371653717537185371953720537215372253723537245372553726537275372853729537305373153732537335373453735537365373753738537395374053741537425374353744537455374653747537485374953750537515375253753537545375553756537575375853759537605376153762537635376453765537665376753768537695377053771537725377353774537755377653777537785377953780537815378253783537845378553786537875378853789537905379153792537935379453795537965379753798537995380053801538025380353804538055380653807538085380953810538115381253813538145381553816538175381853819538205382153822538235382453825538265382753828538295383053831538325383353834538355383653837538385383953840538415384253843538445384553846538475384853849538505385153852538535385453855538565385753858538595386053861538625386353864538655386653867538685386953870538715387253873538745387553876538775387853879538805388153882538835388453885538865388753888538895389053891538925389353894538955389653897538985389953900539015390253903539045390553906539075390853909539105391153912539135391453915539165391753918539195392053921539225392353924539255392653927539285392953930539315393253933539345393553936539375393853939539405394153942539435394453945539465394753948539495395053951539525395353954539555395653957539585395953960539615396253963539645396553966539675396853969539705397153972539735397453975539765397753978539795398053981539825398353984539855398653987539885398953990539915399253993539945399553996539975399853999540005400154002540035400454005540065400754008540095401054011540125401354014540155401654017540185401954020540215402254023540245402554026540275402854029540305403154032540335403454035540365403754038540395404054041540425404354044540455404654047540485404954050540515405254053540545405554056540575405854059540605406154062540635406454065540665406754068540695407054071540725407354074540755407654077540785407954080540815408254083540845408554086540875408854089540905409154092540935409454095540965409754098540995410054101541025410354104541055410654107541085410954110541115411254113541145411554116541175411854119541205412154122541235412454125541265412754128541295413054131541325413354134541355413654137541385413954140541415414254143541445414554146541475414854149541505415154152541535415454155541565415754158541595416054161541625416354164541655416654167541685416954170541715417254173541745417554176541775417854179541805418154182541835418454185541865418754188541895419054191541925419354194541955419654197541985419954200542015420254203542045420554206542075420854209542105421154212542135421454215542165421754218542195422054221542225422354224542255422654227542285422954230542315423254233542345423554236542375423854239542405424154242542435424454245542465424754248542495425054251542525425354254542555425654257542585425954260542615426254263542645426554266542675426854269542705427154272542735427454275542765427754278542795428054281542825428354284542855428654287542885428954290542915429254293542945429554296542975429854299543005430154302543035430454305543065430754308543095431054311543125431354314543155431654317543185431954320543215432254323543245432554326543275432854329543305433154332543335433454335543365433754338543395434054341543425434354344543455434654347543485434954350543515435254353543545435554356543575435854359543605436154362543635436454365543665436754368543695437054371543725437354374543755437654377543785437954380543815438254383543845438554386543875438854389543905439154392543935439454395543965439754398543995440054401544025440354404544055440654407544085440954410544115441254413544145441554416544175441854419544205442154422544235442454425544265442754428544295443054431544325443354434544355443654437544385443954440544415444254443544445444554446544475444854449544505445154452544535445454455544565445754458544595446054461544625446354464544655446654467544685446954470544715447254473544745447554476544775447854479544805448154482544835448454485544865448754488544895449054491544925449354494544955449654497544985449954500545015450254503545045450554506545075450854509545105451154512545135451454515545165451754518545195452054521545225452354524545255452654527545285452954530545315453254533545345453554536545375453854539545405454154542545435454454545545465454754548545495455054551545525455354554545555455654557545585455954560545615456254563545645456554566545675456854569545705457154572545735457454575545765457754578545795458054581545825458354584545855458654587545885458954590545915459254593545945459554596545975459854599546005460154602546035460454605546065460754608546095461054611546125461354614546155461654617546185461954620546215462254623546245462554626546275462854629546305463154632546335463454635546365463754638546395464054641546425464354644546455464654647546485464954650546515465254653546545465554656546575465854659546605466154662546635466454665546665466754668546695467054671546725467354674546755467654677546785467954680546815468254683546845468554686546875468854689546905469154692546935469454695546965469754698546995470054701547025470354704547055470654707547085470954710547115471254713547145471554716547175471854719547205472154722547235472454725547265472754728547295473054731547325473354734547355473654737547385473954740547415474254743547445474554746547475474854749547505475154752547535475454755547565475754758547595476054761547625476354764547655476654767547685476954770547715477254773547745477554776547775477854779547805478154782547835478454785547865478754788547895479054791547925479354794547955479654797547985479954800548015480254803548045480554806548075480854809548105481154812548135481454815548165481754818548195482054821548225482354824548255482654827548285482954830548315483254833548345483554836548375483854839548405484154842548435484454845548465484754848548495485054851548525485354854548555485654857548585485954860548615486254863548645486554866548675486854869548705487154872548735487454875548765487754878548795488054881548825488354884548855488654887548885488954890548915489254893548945489554896548975489854899549005490154902549035490454905549065490754908549095491054911549125491354914549155491654917549185491954920549215492254923549245492554926549275492854929549305493154932549335493454935549365493754938549395494054941549425494354944549455494654947549485494954950549515495254953549545495554956549575495854959549605496154962549635496454965549665496754968549695497054971549725497354974549755497654977549785497954980549815498254983549845498554986549875498854989549905499154992549935499454995549965499754998549995500055001550025500355004550055500655007550085500955010550115501255013550145501555016550175501855019550205502155022550235502455025550265502755028550295503055031550325503355034550355503655037550385503955040550415504255043550445504555046550475504855049550505505155052550535505455055550565505755058550595506055061550625506355064550655506655067550685506955070550715507255073550745507555076550775507855079550805508155082550835508455085550865508755088550895509055091550925509355094550955509655097550985509955100551015510255103551045510555106551075510855109551105511155112551135511455115551165511755118551195512055121551225512355124551255512655127551285512955130551315513255133551345513555136551375513855139551405514155142551435514455145551465514755148551495515055151551525515355154551555515655157551585515955160551615516255163551645516555166551675516855169551705517155172551735517455175551765517755178551795518055181551825518355184551855518655187551885518955190551915519255193551945519555196551975519855199552005520155202552035520455205552065520755208552095521055211552125521355214552155521655217552185521955220552215522255223552245522555226552275522855229552305523155232552335523455235552365523755238552395524055241552425524355244552455524655247552485524955250552515525255253552545525555256552575525855259552605526155262552635526455265552665526755268552695527055271552725527355274552755527655277552785527955280552815528255283552845528555286552875528855289552905529155292552935529455295552965529755298552995530055301553025530355304553055530655307553085530955310553115531255313553145531555316553175531855319553205532155322553235532455325553265532755328553295533055331553325533355334553355533655337553385533955340553415534255343553445534555346553475534855349553505535155352553535535455355553565535755358553595536055361553625536355364553655536655367553685536955370553715537255373553745537555376553775537855379553805538155382553835538455385553865538755388553895539055391553925539355394553955539655397553985539955400554015540255403554045540555406554075540855409554105541155412554135541455415554165541755418554195542055421554225542355424554255542655427554285542955430554315543255433554345543555436554375543855439554405544155442554435544455445554465544755448554495545055451554525545355454554555545655457554585545955460554615546255463554645546555466554675546855469554705547155472554735547455475554765547755478554795548055481554825548355484554855548655487554885548955490554915549255493554945549555496554975549855499555005550155502555035550455505555065550755508555095551055511555125551355514555155551655517555185551955520555215552255523555245552555526555275552855529555305553155532555335553455535555365553755538555395554055541555425554355544555455554655547555485554955550555515555255553555545555555556555575555855559555605556155562555635556455565555665556755568555695557055571555725557355574555755557655577555785557955580555815558255583555845558555586555875558855589555905559155592555935559455595555965559755598555995560055601556025560355604556055560655607556085560955610556115561255613556145561555616556175561855619556205562155622556235562455625556265562755628556295563055631556325563355634556355563655637556385563955640556415564255643556445564555646556475564855649556505565155652556535565455655556565565755658556595566055661556625566355664556655566655667556685566955670556715567255673556745567555676556775567855679556805568155682556835568455685556865568755688556895569055691556925569355694556955569655697556985569955700557015570255703557045570555706557075570855709557105571155712557135571455715557165571755718557195572055721557225572355724557255572655727557285572955730557315573255733557345573555736557375573855739557405574155742557435574455745557465574755748557495575055751557525575355754557555575655757557585575955760557615576255763557645576555766557675576855769557705577155772557735577455775557765577755778557795578055781557825578355784557855578655787557885578955790557915579255793557945579555796557975579855799558005580155802558035580455805558065580755808558095581055811558125581355814558155581655817558185581955820558215582255823558245582555826558275582855829558305583155832558335583455835558365583755838558395584055841558425584355844558455584655847558485584955850558515585255853558545585555856558575585855859558605586155862558635586455865558665586755868558695587055871558725587355874558755587655877558785587955880558815588255883558845588555886558875588855889558905589155892558935589455895558965589755898558995590055901559025590355904559055590655907559085590955910559115591255913559145591555916559175591855919559205592155922559235592455925559265592755928559295593055931559325593355934559355593655937559385593955940559415594255943559445594555946559475594855949559505595155952559535595455955559565595755958559595596055961559625596355964559655596655967559685596955970559715597255973559745597555976559775597855979559805598155982559835598455985559865598755988559895599055991559925599355994559955599655997559985599956000560015600256003560045600556006560075600856009560105601156012560135601456015560165601756018560195602056021560225602356024560255602656027560285602956030560315603256033560345603556036560375603856039560405604156042560435604456045560465604756048560495605056051560525605356054560555605656057560585605956060560615606256063560645606556066560675606856069560705607156072560735607456075560765607756078560795608056081560825608356084560855608656087560885608956090560915609256093560945609556096560975609856099561005610156102561035610456105561065610756108561095611056111561125611356114561155611656117561185611956120561215612256123561245612556126561275612856129561305613156132561335613456135561365613756138561395614056141561425614356144561455614656147561485614956150561515615256153561545615556156561575615856159561605616156162561635616456165561665616756168561695617056171561725617356174561755617656177561785617956180561815618256183561845618556186561875618856189561905619156192561935619456195561965619756198561995620056201562025620356204562055620656207562085620956210562115621256213562145621556216562175621856219562205622156222562235622456225562265622756228562295623056231562325623356234562355623656237562385623956240562415624256243562445624556246562475624856249562505625156252562535625456255562565625756258562595626056261562625626356264562655626656267562685626956270562715627256273562745627556276562775627856279562805628156282562835628456285562865628756288562895629056291562925629356294562955629656297562985629956300563015630256303563045630556306563075630856309563105631156312563135631456315563165631756318563195632056321563225632356324563255632656327563285632956330563315633256333563345633556336563375633856339563405634156342563435634456345563465634756348563495635056351563525635356354563555635656357563585635956360563615636256363563645636556366563675636856369563705637156372563735637456375563765637756378563795638056381563825638356384563855638656387563885638956390563915639256393563945639556396563975639856399564005640156402564035640456405564065640756408564095641056411564125641356414564155641656417564185641956420564215642256423564245642556426564275642856429564305643156432564335643456435564365643756438564395644056441564425644356444564455644656447564485644956450564515645256453564545645556456564575645856459564605646156462564635646456465564665646756468564695647056471564725647356474564755647656477564785647956480564815648256483564845648556486564875648856489564905649156492564935649456495564965649756498564995650056501565025650356504565055650656507565085650956510565115651256513565145651556516565175651856519565205652156522565235652456525565265652756528565295653056531565325653356534565355653656537565385653956540565415654256543565445654556546565475654856549565505655156552565535655456555565565655756558565595656056561565625656356564565655656656567565685656956570565715657256573565745657556576565775657856579565805658156582565835658456585565865658756588565895659056591565925659356594565955659656597565985659956600566015660256603566045660556606566075660856609566105661156612566135661456615566165661756618566195662056621566225662356624566255662656627566285662956630566315663256633566345663556636566375663856639566405664156642566435664456645566465664756648566495665056651566525665356654566555665656657566585665956660566615666256663566645666556666566675666856669566705667156672566735667456675566765667756678566795668056681566825668356684566855668656687566885668956690566915669256693566945669556696566975669856699567005670156702567035670456705567065670756708567095671056711567125671356714567155671656717567185671956720567215672256723567245672556726567275672856729567305673156732567335673456735567365673756738567395674056741567425674356744567455674656747567485674956750567515675256753567545675556756567575675856759567605676156762567635676456765567665676756768567695677056771567725677356774567755677656777567785677956780567815678256783567845678556786567875678856789567905679156792567935679456795567965679756798567995680056801568025680356804568055680656807568085680956810568115681256813568145681556816568175681856819568205682156822568235682456825568265682756828568295683056831568325683356834568355683656837568385683956840568415684256843568445684556846568475684856849568505685156852568535685456855568565685756858568595686056861568625686356864568655686656867568685686956870568715687256873568745687556876568775687856879568805688156882568835688456885568865688756888568895689056891568925689356894568955689656897568985689956900569015690256903569045690556906569075690856909569105691156912569135691456915569165691756918569195692056921569225692356924569255692656927569285692956930569315693256933569345693556936569375693856939569405694156942569435694456945569465694756948569495695056951569525695356954569555695656957569585695956960569615696256963569645696556966569675696856969569705697156972569735697456975569765697756978569795698056981569825698356984569855698656987569885698956990569915699256993569945699556996569975699856999570005700157002570035700457005570065700757008570095701057011570125701357014570155701657017570185701957020570215702257023570245702557026570275702857029570305703157032570335703457035570365703757038570395704057041570425704357044570455704657047570485704957050570515705257053570545705557056570575705857059570605706157062570635706457065570665706757068570695707057071570725707357074570755707657077570785707957080570815708257083570845708557086570875708857089570905709157092570935709457095570965709757098570995710057101571025710357104571055710657107571085710957110571115711257113571145711557116571175711857119571205712157122571235712457125571265712757128571295713057131571325713357134571355713657137571385713957140571415714257143571445714557146571475714857149571505715157152571535715457155571565715757158571595716057161571625716357164571655716657167571685716957170571715717257173571745717557176571775717857179571805718157182571835718457185571865718757188571895719057191571925719357194571955719657197571985719957200572015720257203572045720557206572075720857209572105721157212572135721457215572165721757218572195722057221572225722357224572255722657227572285722957230572315723257233572345723557236572375723857239572405724157242572435724457245572465724757248572495725057251572525725357254572555725657257572585725957260572615726257263572645726557266572675726857269572705727157272572735727457275572765727757278572795728057281572825728357284572855728657287572885728957290572915729257293572945729557296572975729857299573005730157302573035730457305573065730757308573095731057311573125731357314573155731657317573185731957320573215732257323573245732557326573275732857329573305733157332573335733457335573365733757338573395734057341573425734357344573455734657347573485734957350573515735257353573545735557356573575735857359573605736157362573635736457365573665736757368573695737057371573725737357374573755737657377573785737957380573815738257383573845738557386573875738857389573905739157392573935739457395573965739757398573995740057401574025740357404574055740657407574085740957410574115741257413574145741557416574175741857419574205742157422574235742457425574265742757428574295743057431574325743357434574355743657437574385743957440574415744257443574445744557446574475744857449574505745157452574535745457455574565745757458574595746057461574625746357464574655746657467574685746957470574715747257473574745747557476574775747857479574805748157482574835748457485574865748757488574895749057491574925749357494574955749657497574985749957500575015750257503575045750557506575075750857509575105751157512575135751457515575165751757518575195752057521575225752357524575255752657527575285752957530575315753257533575345753557536575375753857539575405754157542575435754457545575465754757548575495755057551575525755357554575555755657557575585755957560575615756257563575645756557566575675756857569575705757157572575735757457575575765757757578575795758057581575825758357584575855758657587575885758957590575915759257593575945759557596575975759857599576005760157602576035760457605576065760757608576095761057611576125761357614576155761657617576185761957620576215762257623576245762557626576275762857629576305763157632576335763457635576365763757638576395764057641576425764357644576455764657647576485764957650576515765257653576545765557656576575765857659576605766157662576635766457665576665766757668576695767057671576725767357674576755767657677576785767957680576815768257683576845768557686576875768857689576905769157692576935769457695576965769757698576995770057701577025770357704577055770657707577085770957710577115771257713577145771557716577175771857719577205772157722577235772457725577265772757728577295773057731577325773357734577355773657737577385773957740577415774257743577445774557746577475774857749577505775157752577535775457755577565775757758577595776057761577625776357764577655776657767577685776957770577715777257773577745777557776577775777857779577805778157782577835778457785577865778757788577895779057791577925779357794577955779657797577985779957800578015780257803578045780557806578075780857809578105781157812578135781457815578165781757818578195782057821578225782357824578255782657827578285782957830578315783257833578345783557836578375783857839578405784157842578435784457845578465784757848578495785057851578525785357854578555785657857578585785957860578615786257863578645786557866578675786857869578705787157872578735787457875578765787757878578795788057881578825788357884578855788657887578885788957890578915789257893578945789557896578975789857899579005790157902579035790457905579065790757908579095791057911579125791357914579155791657917579185791957920579215792257923579245792557926579275792857929579305793157932579335793457935579365793757938579395794057941579425794357944579455794657947579485794957950579515795257953579545795557956579575795857959579605796157962579635796457965579665796757968579695797057971579725797357974579755797657977579785797957980579815798257983579845798557986579875798857989579905799157992579935799457995579965799757998579995800058001580025800358004580055800658007580085800958010580115801258013580145801558016580175801858019580205802158022580235802458025580265802758028580295803058031580325803358034580355803658037580385803958040580415804258043580445804558046580475804858049580505805158052580535805458055580565805758058580595806058061580625806358064580655806658067580685806958070580715807258073580745807558076580775807858079580805808158082580835808458085580865808758088580895809058091580925809358094580955809658097580985809958100581015810258103581045810558106581075810858109581105811158112581135811458115581165811758118581195812058121581225812358124581255812658127581285812958130581315813258133581345813558136581375813858139581405814158142581435814458145581465814758148581495815058151581525815358154581555815658157581585815958160581615816258163581645816558166581675816858169581705817158172581735817458175581765817758178581795818058181581825818358184581855818658187581885818958190581915819258193581945819558196581975819858199582005820158202582035820458205582065820758208582095821058211582125821358214582155821658217582185821958220582215822258223582245822558226582275822858229582305823158232582335823458235582365823758238582395824058241582425824358244582455824658247582485824958250582515825258253582545825558256582575825858259582605826158262582635826458265582665826758268582695827058271582725827358274582755827658277582785827958280582815828258283582845828558286582875828858289582905829158292582935829458295582965829758298582995830058301583025830358304583055830658307583085830958310583115831258313583145831558316583175831858319583205832158322583235832458325583265832758328583295833058331583325833358334583355833658337583385833958340583415834258343583445834558346583475834858349583505835158352583535835458355583565835758358583595836058361583625836358364583655836658367583685836958370583715837258373583745837558376583775837858379583805838158382583835838458385583865838758388583895839058391583925839358394583955839658397583985839958400584015840258403584045840558406584075840858409584105841158412584135841458415584165841758418584195842058421584225842358424584255842658427584285842958430584315843258433584345843558436584375843858439584405844158442584435844458445584465844758448584495845058451584525845358454584555845658457584585845958460584615846258463584645846558466584675846858469584705847158472584735847458475584765847758478584795848058481584825848358484584855848658487584885848958490584915849258493584945849558496584975849858499585005850158502585035850458505585065850758508585095851058511585125851358514585155851658517585185851958520585215852258523585245852558526585275852858529585305853158532585335853458535585365853758538585395854058541585425854358544585455854658547585485854958550585515855258553585545855558556585575855858559585605856158562585635856458565585665856758568585695857058571585725857358574585755857658577585785857958580585815858258583585845858558586585875858858589585905859158592585935859458595585965859758598585995860058601586025860358604586055860658607586085860958610586115861258613586145861558616586175861858619586205862158622586235862458625586265862758628586295863058631586325863358634586355863658637586385863958640586415864258643586445864558646586475864858649586505865158652586535865458655586565865758658586595866058661586625866358664586655866658667586685866958670586715867258673586745867558676586775867858679586805868158682586835868458685586865868758688586895869058691586925869358694586955869658697586985869958700587015870258703587045870558706587075870858709587105871158712587135871458715587165871758718587195872058721587225872358724587255872658727587285872958730587315873258733587345873558736587375873858739587405874158742587435874458745587465874758748587495875058751587525875358754587555875658757587585875958760587615876258763587645876558766587675876858769587705877158772587735877458775587765877758778587795878058781587825878358784587855878658787587885878958790587915879258793587945879558796587975879858799588005880158802588035880458805588065880758808588095881058811588125881358814588155881658817588185881958820588215882258823588245882558826588275882858829588305883158832588335883458835588365883758838588395884058841588425884358844588455884658847588485884958850588515885258853588545885558856588575885858859588605886158862588635886458865588665886758868588695887058871588725887358874588755887658877588785887958880588815888258883588845888558886588875888858889588905889158892588935889458895588965889758898588995890058901589025890358904589055890658907589085890958910589115891258913589145891558916589175891858919589205892158922589235892458925589265892758928589295893058931589325893358934589355893658937589385893958940589415894258943589445894558946589475894858949589505895158952589535895458955589565895758958589595896058961589625896358964589655896658967589685896958970589715897258973589745897558976589775897858979589805898158982589835898458985589865898758988589895899058991589925899358994589955899658997589985899959000590015900259003590045900559006590075900859009590105901159012590135901459015590165901759018590195902059021590225902359024590255902659027590285902959030590315903259033590345903559036590375903859039590405904159042590435904459045590465904759048590495905059051590525905359054590555905659057590585905959060590615906259063590645906559066590675906859069590705907159072590735907459075590765907759078590795908059081590825908359084590855908659087590885908959090590915909259093590945909559096590975909859099591005910159102591035910459105591065910759108591095911059111591125911359114591155911659117591185911959120591215912259123591245912559126591275912859129591305913159132591335913459135591365913759138591395914059141591425914359144591455914659147591485914959150591515915259153591545915559156591575915859159591605916159162591635916459165591665916759168591695917059171591725917359174591755917659177591785917959180591815918259183591845918559186591875918859189591905919159192591935919459195591965919759198591995920059201592025920359204592055920659207592085920959210592115921259213592145921559216592175921859219592205922159222592235922459225592265922759228592295923059231592325923359234592355923659237592385923959240592415924259243592445924559246592475924859249592505925159252592535925459255592565925759258592595926059261592625926359264592655926659267592685926959270592715927259273592745927559276592775927859279592805928159282592835928459285592865928759288592895929059291592925929359294592955929659297592985929959300593015930259303593045930559306593075930859309593105931159312593135931459315593165931759318593195932059321593225932359324593255932659327593285932959330593315933259333593345933559336593375933859339593405934159342593435934459345593465934759348593495935059351593525935359354593555935659357593585935959360593615936259363593645936559366593675936859369593705937159372593735937459375593765937759378593795938059381593825938359384593855938659387593885938959390593915939259393593945939559396593975939859399594005940159402594035940459405594065940759408594095941059411594125941359414594155941659417594185941959420594215942259423594245942559426594275942859429594305943159432594335943459435594365943759438594395944059441594425944359444594455944659447594485944959450594515945259453594545945559456594575945859459594605946159462594635946459465594665946759468594695947059471594725947359474594755947659477594785947959480594815948259483594845948559486594875948859489594905949159492594935949459495594965949759498594995950059501595025950359504595055950659507595085950959510595115951259513595145951559516595175951859519595205952159522595235952459525595265952759528595295953059531595325953359534595355953659537595385953959540595415954259543595445954559546595475954859549595505955159552595535955459555595565955759558595595956059561595625956359564595655956659567595685956959570595715957259573595745957559576595775957859579595805958159582595835958459585595865958759588595895959059591595925959359594595955959659597595985959959600596015960259603596045960559606596075960859609596105961159612596135961459615596165961759618596195962059621596225962359624596255962659627596285962959630596315963259633596345963559636596375963859639596405964159642596435964459645596465964759648596495965059651596525965359654596555965659657596585965959660596615966259663596645966559666596675966859669596705967159672596735967459675596765967759678596795968059681596825968359684596855968659687596885968959690596915969259693596945969559696596975969859699597005970159702597035970459705597065970759708597095971059711597125971359714597155971659717597185971959720597215972259723597245972559726597275972859729597305973159732597335973459735597365973759738597395974059741597425974359744597455974659747597485974959750597515975259753597545975559756597575975859759597605976159762597635976459765597665976759768597695977059771597725977359774597755977659777597785977959780597815978259783597845978559786597875978859789597905979159792597935979459795597965979759798597995980059801598025980359804598055980659807598085980959810598115981259813598145981559816598175981859819598205982159822598235982459825598265982759828598295983059831598325983359834598355983659837598385983959840598415984259843598445984559846598475984859849598505985159852598535985459855598565985759858598595986059861598625986359864598655986659867598685986959870598715987259873598745987559876598775987859879598805988159882598835988459885598865988759888598895989059891598925989359894598955989659897598985989959900599015990259903599045990559906599075990859909599105991159912599135991459915599165991759918599195992059921599225992359924599255992659927599285992959930599315993259933599345993559936599375993859939599405994159942599435994459945599465994759948599495995059951599525995359954599555995659957599585995959960599615996259963599645996559966599675996859969599705997159972599735997459975599765997759978599795998059981599825998359984599855998659987599885998959990599915999259993599945999559996599975999859999600006000160002600036000460005600066000760008600096001060011600126001360014600156001660017600186001960020600216002260023600246002560026600276002860029600306003160032600336003460035600366003760038600396004060041600426004360044600456004660047600486004960050600516005260053600546005560056600576005860059600606006160062600636006460065600666006760068600696007060071600726007360074600756007660077600786007960080600816008260083600846008560086600876008860089600906009160092600936009460095600966009760098600996010060101601026010360104601056010660107601086010960110601116011260113601146011560116601176011860119601206012160122601236012460125601266012760128601296013060131601326013360134601356013660137601386013960140601416014260143601446014560146601476014860149601506015160152601536015460155601566015760158601596016060161601626016360164601656016660167601686016960170601716017260173601746017560176601776017860179601806018160182601836018460185601866018760188601896019060191601926019360194601956019660197601986019960200602016020260203602046020560206602076020860209602106021160212602136021460215602166021760218602196022060221602226022360224602256022660227602286022960230602316023260233602346023560236602376023860239602406024160242602436024460245602466024760248602496025060251602526025360254602556025660257602586025960260602616026260263602646026560266602676026860269602706027160272602736027460275602766027760278602796028060281602826028360284602856028660287602886028960290602916029260293602946029560296602976029860299603006030160302603036030460305603066030760308603096031060311603126031360314603156031660317603186031960320603216032260323603246032560326603276032860329603306033160332603336033460335603366033760338603396034060341603426034360344603456034660347603486034960350603516035260353603546035560356603576035860359603606036160362603636036460365603666036760368603696037060371603726037360374603756037660377603786037960380603816038260383603846038560386603876038860389603906039160392603936039460395603966039760398603996040060401604026040360404604056040660407604086040960410604116041260413604146041560416604176041860419604206042160422604236042460425604266042760428604296043060431604326043360434604356043660437604386043960440604416044260443604446044560446604476044860449604506045160452604536045460455604566045760458604596046060461604626046360464604656046660467604686046960470604716047260473604746047560476604776047860479604806048160482604836048460485604866048760488604896049060491604926049360494604956049660497604986049960500605016050260503605046050560506605076050860509605106051160512605136051460515605166051760518605196052060521605226052360524605256052660527605286052960530605316053260533605346053560536605376053860539605406054160542605436054460545605466054760548605496055060551605526055360554605556055660557605586055960560605616056260563605646056560566605676056860569605706057160572605736057460575605766057760578605796058060581605826058360584605856058660587605886058960590605916059260593605946059560596605976059860599606006060160602606036060460605606066060760608606096061060611606126061360614606156061660617606186061960620606216062260623606246062560626606276062860629606306063160632606336063460635606366063760638606396064060641606426064360644606456064660647606486064960650606516065260653606546065560656606576065860659606606066160662606636066460665606666066760668606696067060671606726067360674606756067660677606786067960680606816068260683606846068560686606876068860689606906069160692606936069460695606966069760698606996070060701607026070360704607056070660707607086070960710607116071260713607146071560716607176071860719607206072160722607236072460725607266072760728607296073060731607326073360734607356073660737607386073960740607416074260743607446074560746607476074860749607506075160752607536075460755607566075760758607596076060761607626076360764607656076660767607686076960770607716077260773607746077560776607776077860779607806078160782607836078460785607866078760788607896079060791607926079360794607956079660797607986079960800608016080260803608046080560806608076080860809608106081160812608136081460815608166081760818608196082060821608226082360824608256082660827608286082960830608316083260833608346083560836608376083860839608406084160842608436084460845608466084760848608496085060851608526085360854608556085660857608586085960860608616086260863608646086560866608676086860869608706087160872608736087460875608766087760878608796088060881608826088360884608856088660887608886088960890608916089260893608946089560896608976089860899609006090160902609036090460905609066090760908609096091060911609126091360914609156091660917609186091960920609216092260923609246092560926609276092860929609306093160932609336093460935609366093760938609396094060941609426094360944609456094660947609486094960950609516095260953609546095560956609576095860959609606096160962609636096460965609666096760968609696097060971609726097360974609756097660977609786097960980609816098260983609846098560986609876098860989609906099160992609936099460995609966099760998609996100061001610026100361004610056100661007610086100961010610116101261013610146101561016610176101861019610206102161022610236102461025610266102761028610296103061031610326103361034610356103661037610386103961040610416104261043610446104561046610476104861049610506105161052610536105461055610566105761058610596106061061610626106361064610656106661067610686106961070610716107261073610746107561076610776107861079610806108161082610836108461085610866108761088610896109061091610926109361094610956109661097610986109961100611016110261103611046110561106611076110861109611106111161112611136111461115611166111761118611196112061121611226112361124611256112661127611286112961130611316113261133611346113561136611376113861139611406114161142611436114461145611466114761148611496115061151611526115361154611556115661157611586115961160611616116261163611646116561166611676116861169611706117161172611736117461175611766117761178611796118061181611826118361184611856118661187611886118961190611916119261193611946119561196611976119861199612006120161202612036120461205612066120761208612096121061211612126121361214612156121661217612186121961220612216122261223612246122561226612276122861229612306123161232612336123461235612366123761238612396124061241612426124361244612456124661247612486124961250612516125261253612546125561256612576125861259612606126161262612636126461265612666126761268612696127061271612726127361274612756127661277612786127961280612816128261283612846128561286612876128861289612906129161292612936129461295612966129761298612996130061301613026130361304613056130661307613086130961310613116131261313613146131561316613176131861319613206132161322613236132461325613266132761328613296133061331613326133361334613356133661337613386133961340613416134261343613446134561346613476134861349613506135161352613536135461355613566135761358613596136061361613626136361364613656136661367613686136961370613716137261373613746137561376613776137861379613806138161382613836138461385613866138761388613896139061391613926139361394613956139661397613986139961400614016140261403614046140561406614076140861409614106141161412614136141461415614166141761418614196142061421614226142361424614256142661427614286142961430614316143261433614346143561436614376143861439614406144161442614436144461445614466144761448614496145061451614526145361454614556145661457614586145961460614616146261463614646146561466614676146861469614706147161472614736147461475614766147761478614796148061481614826148361484614856148661487614886148961490614916149261493614946149561496614976149861499615006150161502615036150461505615066150761508615096151061511615126151361514615156151661517615186151961520615216152261523615246152561526615276152861529615306153161532615336153461535615366153761538615396154061541615426154361544615456154661547615486154961550615516155261553615546155561556615576155861559615606156161562615636156461565615666156761568615696157061571615726157361574615756157661577615786157961580615816158261583615846158561586615876158861589615906159161592615936159461595615966159761598615996160061601616026160361604616056160661607616086160961610616116161261613616146161561616616176161861619616206162161622616236162461625616266162761628616296163061631616326163361634616356163661637616386163961640616416164261643616446164561646616476164861649616506165161652616536165461655616566165761658616596166061661616626166361664616656166661667616686166961670616716167261673616746167561676616776167861679616806168161682616836168461685616866168761688616896169061691616926169361694616956169661697616986169961700617016170261703617046170561706617076170861709617106171161712617136171461715617166171761718617196172061721617226172361724617256172661727617286172961730617316173261733617346173561736617376173861739617406174161742617436174461745617466174761748617496175061751617526175361754617556175661757617586175961760617616176261763617646176561766617676176861769617706177161772617736177461775617766177761778617796178061781617826178361784617856178661787617886178961790617916179261793617946179561796617976179861799618006180161802618036180461805618066180761808618096181061811618126181361814618156181661817618186181961820618216182261823618246182561826618276182861829618306183161832618336183461835618366183761838618396184061841618426184361844618456184661847618486184961850618516185261853618546185561856618576185861859618606186161862618636186461865618666186761868618696187061871618726187361874618756187661877618786187961880618816188261883618846188561886618876188861889618906189161892618936189461895618966189761898618996190061901619026190361904619056190661907619086190961910619116191261913619146191561916619176191861919619206192161922619236192461925619266192761928619296193061931619326193361934619356193661937619386193961940619416194261943619446194561946619476194861949619506195161952619536195461955619566195761958619596196061961619626196361964619656196661967619686196961970619716197261973619746197561976619776197861979619806198161982619836198461985619866198761988619896199061991619926199361994619956199661997619986199962000620016200262003620046200562006620076200862009620106201162012620136201462015620166201762018620196202062021620226202362024620256202662027620286202962030620316203262033620346203562036620376203862039620406204162042620436204462045620466204762048620496205062051620526205362054620556205662057620586205962060620616206262063620646206562066620676206862069620706207162072620736207462075620766207762078620796208062081620826208362084620856208662087620886208962090620916209262093620946209562096620976209862099621006210162102621036210462105621066210762108621096211062111621126211362114621156211662117621186211962120621216212262123621246212562126621276212862129621306213162132621336213462135621366213762138621396214062141621426214362144621456214662147621486214962150621516215262153621546215562156621576215862159621606216162162621636216462165621666216762168621696217062171621726217362174621756217662177621786217962180621816218262183621846218562186621876218862189621906219162192621936219462195621966219762198621996220062201622026220362204622056220662207622086220962210622116221262213622146221562216622176221862219622206222162222622236222462225622266222762228622296223062231622326223362234622356223662237622386223962240622416224262243622446224562246622476224862249622506225162252622536225462255622566225762258622596226062261622626226362264622656226662267622686226962270622716227262273622746227562276622776227862279622806228162282622836228462285622866228762288622896229062291622926229362294622956229662297622986229962300623016230262303623046230562306623076230862309623106231162312623136231462315623166231762318623196232062321623226232362324623256232662327623286232962330623316233262333623346233562336623376233862339623406234162342623436234462345623466234762348623496235062351623526235362354623556235662357623586235962360623616236262363623646236562366623676236862369623706237162372623736237462375623766237762378623796238062381623826238362384623856238662387623886238962390623916239262393623946239562396623976239862399624006240162402624036240462405624066240762408624096241062411624126241362414624156241662417624186241962420624216242262423624246242562426624276242862429624306243162432624336243462435624366243762438624396244062441624426244362444624456244662447624486244962450624516245262453624546245562456624576245862459624606246162462624636246462465624666246762468624696247062471624726247362474624756247662477624786247962480624816248262483624846248562486624876248862489624906249162492624936249462495624966249762498624996250062501625026250362504625056250662507625086250962510625116251262513625146251562516625176251862519625206252162522625236252462525625266252762528625296253062531625326253362534625356253662537625386253962540625416254262543625446254562546625476254862549625506255162552625536255462555625566255762558625596256062561625626256362564625656256662567625686256962570625716257262573625746257562576625776257862579625806258162582625836258462585625866258762588625896259062591625926259362594625956259662597625986259962600626016260262603626046260562606626076260862609626106261162612626136261462615626166261762618626196262062621626226262362624626256262662627626286262962630626316263262633626346263562636626376263862639626406264162642626436264462645626466264762648626496265062651626526265362654626556265662657626586265962660626616266262663626646266562666626676266862669626706267162672626736267462675626766267762678626796268062681626826268362684626856268662687626886268962690626916269262693626946269562696626976269862699627006270162702627036270462705627066270762708627096271062711627126271362714627156271662717627186271962720627216272262723627246272562726627276272862729627306273162732627336273462735627366273762738627396274062741627426274362744627456274662747627486274962750627516275262753627546275562756627576275862759627606276162762627636276462765627666276762768627696277062771627726277362774627756277662777627786277962780627816278262783627846278562786627876278862789627906279162792627936279462795627966279762798627996280062801628026280362804628056280662807628086280962810628116281262813628146281562816628176281862819628206282162822628236282462825628266282762828628296283062831628326283362834628356283662837628386283962840628416284262843628446284562846628476284862849628506285162852628536285462855628566285762858628596286062861628626286362864628656286662867628686286962870628716287262873628746287562876628776287862879628806288162882628836288462885628866288762888628896289062891628926289362894628956289662897628986289962900629016290262903629046290562906629076290862909629106291162912629136291462915629166291762918629196292062921629226292362924629256292662927629286292962930629316293262933629346293562936629376293862939629406294162942629436294462945629466294762948629496295062951629526295362954629556295662957629586295962960629616296262963629646296562966629676296862969629706297162972629736297462975629766297762978629796298062981629826298362984629856298662987629886298962990629916299262993629946299562996629976299862999630006300163002630036300463005630066300763008630096301063011630126301363014630156301663017630186301963020630216302263023630246302563026630276302863029630306303163032630336303463035630366303763038630396304063041630426304363044630456304663047630486304963050630516305263053630546305563056630576305863059630606306163062630636306463065630666306763068630696307063071630726307363074630756307663077630786307963080630816308263083630846308563086630876308863089630906309163092630936309463095630966309763098630996310063101631026310363104631056310663107631086310963110631116311263113631146311563116631176311863119631206312163122631236312463125631266312763128631296313063131631326313363134631356313663137631386313963140631416314263143631446314563146631476314863149631506315163152631536315463155631566315763158631596316063161631626316363164631656316663167631686316963170631716317263173631746317563176631776317863179631806318163182631836318463185631866318763188631896319063191631926319363194631956319663197631986319963200632016320263203632046320563206632076320863209632106321163212632136321463215632166321763218632196322063221632226322363224632256322663227632286322963230632316323263233632346323563236632376323863239632406324163242632436324463245632466324763248632496325063251632526325363254632556325663257632586325963260632616326263263632646326563266632676326863269632706327163272632736327463275632766327763278632796328063281632826328363284632856328663287632886328963290632916329263293632946329563296632976329863299633006330163302633036330463305633066330763308633096331063311633126331363314633156331663317633186331963320633216332263323633246332563326633276332863329633306333163332633336333463335633366333763338633396334063341633426334363344633456334663347633486334963350633516335263353633546335563356633576335863359633606336163362633636336463365633666336763368633696337063371633726337363374633756337663377633786337963380633816338263383633846338563386633876338863389633906339163392633936339463395633966339763398633996340063401634026340363404634056340663407634086340963410634116341263413634146341563416634176341863419634206342163422634236342463425634266342763428634296343063431634326343363434634356343663437634386343963440634416344263443634446344563446634476344863449634506345163452634536345463455634566345763458634596346063461634626346363464634656346663467634686346963470634716347263473634746347563476634776347863479634806348163482634836348463485634866348763488634896349063491634926349363494634956349663497634986349963500635016350263503635046350563506635076350863509635106351163512635136351463515635166351763518635196352063521635226352363524635256352663527635286352963530635316353263533635346353563536635376353863539635406354163542635436354463545635466354763548635496355063551635526355363554635556355663557635586355963560635616356263563635646356563566635676356863569635706357163572635736357463575635766357763578635796358063581635826358363584635856358663587635886358963590635916359263593635946359563596635976359863599636006360163602636036360463605636066360763608636096361063611636126361363614636156361663617636186361963620636216362263623636246362563626636276362863629636306363163632636336363463635636366363763638636396364063641636426364363644636456364663647636486364963650636516365263653636546365563656636576365863659636606366163662636636366463665636666366763668636696367063671636726367363674636756367663677636786367963680636816368263683636846368563686636876368863689636906369163692636936369463695636966369763698636996370063701637026370363704637056370663707637086370963710637116371263713637146371563716637176371863719637206372163722637236372463725637266372763728637296373063731637326373363734637356373663737637386373963740637416374263743637446374563746637476374863749637506375163752637536375463755637566375763758637596376063761637626376363764637656376663767637686376963770637716377263773637746377563776637776377863779637806378163782637836378463785637866378763788637896379063791637926379363794637956379663797637986379963800638016380263803638046380563806638076380863809638106381163812638136381463815638166381763818638196382063821638226382363824638256382663827638286382963830638316383263833638346383563836638376383863839638406384163842638436384463845638466384763848638496385063851638526385363854638556385663857638586385963860638616386263863638646386563866638676386863869638706387163872638736387463875638766387763878638796388063881638826388363884638856388663887638886388963890638916389263893638946389563896638976389863899639006390163902639036390463905639066390763908639096391063911639126391363914639156391663917639186391963920639216392263923639246392563926639276392863929639306393163932639336393463935639366393763938639396394063941639426394363944639456394663947639486394963950639516395263953639546395563956639576395863959639606396163962639636396463965639666396763968639696397063971639726397363974639756397663977639786397963980639816398263983639846398563986639876398863989639906399163992639936399463995639966399763998639996400064001640026400364004640056400664007640086400964010640116401264013640146401564016640176401864019640206402164022640236402464025640266402764028640296403064031640326403364034640356403664037640386403964040640416404264043640446404564046640476404864049640506405164052640536405464055640566405764058640596406064061640626406364064640656406664067640686406964070640716407264073640746407564076640776407864079640806408164082640836408464085640866408764088640896409064091640926409364094640956409664097640986409964100641016410264103641046410564106641076410864109641106411164112641136411464115641166411764118641196412064121641226412364124641256412664127641286412964130641316413264133641346413564136641376413864139641406414164142641436414464145641466414764148641496415064151641526415364154641556415664157641586415964160641616416264163641646416564166641676416864169641706417164172641736417464175641766417764178641796418064181641826418364184641856418664187641886418964190641916419264193641946419564196641976419864199642006420164202642036420464205642066420764208642096421064211642126421364214642156421664217642186421964220642216422264223642246422564226642276422864229642306423164232642336423464235642366423764238642396424064241642426424364244642456424664247642486424964250642516425264253642546425564256642576425864259642606426164262642636426464265642666426764268642696427064271642726427364274642756427664277642786427964280642816428264283642846428564286642876428864289642906429164292642936429464295642966429764298642996430064301643026430364304643056430664307643086430964310643116431264313643146431564316643176431864319643206432164322643236432464325643266432764328643296433064331643326433364334643356433664337643386433964340643416434264343643446434564346643476434864349643506435164352643536435464355643566435764358643596436064361643626436364364643656436664367643686436964370643716437264373643746437564376643776437864379643806438164382643836438464385643866438764388643896439064391643926439364394643956439664397643986439964400644016440264403644046440564406644076440864409644106441164412644136441464415644166441764418644196442064421644226442364424644256442664427644286442964430644316443264433644346443564436644376443864439644406444164442644436444464445644466444764448644496445064451644526445364454644556445664457644586445964460644616446264463644646446564466644676446864469644706447164472644736447464475644766447764478644796448064481644826448364484644856448664487644886448964490644916449264493644946449564496644976449864499645006450164502645036450464505645066450764508645096451064511645126451364514645156451664517645186451964520645216452264523645246452564526645276452864529645306453164532645336453464535645366453764538645396454064541645426454364544645456454664547645486454964550645516455264553645546455564556645576455864559645606456164562645636456464565645666456764568645696457064571645726457364574645756457664577645786457964580645816458264583645846458564586645876458864589645906459164592645936459464595645966459764598645996460064601646026460364604646056460664607646086460964610646116461264613646146461564616646176461864619646206462164622646236462464625646266462764628646296463064631646326463364634646356463664637646386463964640646416464264643646446464564646646476464864649646506465164652646536465464655646566465764658646596466064661646626466364664646656466664667646686466964670646716467264673646746467564676646776467864679646806468164682646836468464685646866468764688646896469064691646926469364694646956469664697646986469964700647016470264703647046470564706647076470864709647106471164712647136471464715647166471764718647196472064721647226472364724647256472664727647286472964730647316473264733647346473564736647376473864739647406474164742647436474464745647466474764748647496475064751647526475364754647556475664757647586475964760647616476264763647646476564766647676476864769647706477164772647736477464775647766477764778647796478064781647826478364784647856478664787647886478964790647916479264793647946479564796647976479864799648006480164802648036480464805648066480764808648096481064811648126481364814648156481664817648186481964820648216482264823648246482564826648276482864829648306483164832648336483464835648366483764838648396484064841648426484364844648456484664847648486484964850648516485264853648546485564856648576485864859648606486164862648636486464865648666486764868648696487064871648726487364874648756487664877648786487964880648816488264883648846488564886648876488864889648906489164892648936489464895648966489764898648996490064901649026490364904649056490664907649086490964910649116491264913649146491564916649176491864919649206492164922649236492464925649266492764928649296493064931649326493364934649356493664937649386493964940649416494264943649446494564946649476494864949649506495164952649536495464955649566495764958649596496064961649626496364964649656496664967649686496964970649716497264973649746497564976649776497864979649806498164982649836498464985649866498764988649896499064991649926499364994649956499664997649986499965000650016500265003650046500565006650076500865009650106501165012650136501465015650166501765018650196502065021650226502365024650256502665027650286502965030650316503265033650346503565036650376503865039650406504165042650436504465045650466504765048650496505065051650526505365054650556505665057650586505965060650616506265063650646506565066650676506865069650706507165072650736507465075650766507765078650796508065081650826508365084650856508665087650886508965090650916509265093650946509565096650976509865099651006510165102651036510465105651066510765108651096511065111651126511365114651156511665117651186511965120651216512265123651246512565126651276512865129651306513165132651336513465135651366513765138651396514065141651426514365144651456514665147651486514965150651516515265153651546515565156651576515865159651606516165162651636516465165651666516765168651696517065171651726517365174651756517665177651786517965180651816518265183651846518565186651876518865189651906519165192651936519465195651966519765198651996520065201652026520365204652056520665207652086520965210652116521265213652146521565216652176521865219652206522165222652236522465225652266522765228652296523065231652326523365234652356523665237652386523965240652416524265243652446524565246652476524865249652506525165252652536525465255652566525765258652596526065261652626526365264652656526665267652686526965270652716527265273652746527565276652776527865279652806528165282652836528465285652866528765288652896529065291652926529365294652956529665297652986529965300653016530265303653046530565306653076530865309653106531165312653136531465315653166531765318653196532065321653226532365324653256532665327653286532965330653316533265333653346533565336653376533865339653406534165342653436534465345653466534765348653496535065351653526535365354653556535665357653586535965360653616536265363653646536565366653676536865369653706537165372653736537465375653766537765378653796538065381653826538365384653856538665387653886538965390653916539265393653946539565396653976539865399654006540165402654036540465405654066540765408654096541065411654126541365414654156541665417654186541965420654216542265423654246542565426654276542865429654306543165432654336543465435654366543765438654396544065441654426544365444654456544665447654486544965450654516545265453654546545565456654576545865459654606546165462654636546465465654666546765468654696547065471654726547365474654756547665477654786547965480654816548265483654846548565486654876548865489654906549165492654936549465495654966549765498654996550065501655026550365504655056550665507655086550965510655116551265513655146551565516655176551865519655206552165522655236552465525655266552765528655296553065531655326553365534655356553665537655386553965540655416554265543655446554565546655476554865549655506555165552655536555465555655566555765558655596556065561655626556365564655656556665567655686556965570655716557265573655746557565576655776557865579655806558165582655836558465585655866558765588655896559065591655926559365594655956559665597655986559965600656016560265603656046560565606656076560865609656106561165612656136561465615656166561765618656196562065621656226562365624656256562665627656286562965630656316563265633656346563565636656376563865639656406564165642656436564465645656466564765648656496565065651656526565365654656556565665657656586565965660656616566265663656646566565666656676566865669656706567165672656736567465675656766567765678656796568065681656826568365684656856568665687656886568965690656916569265693656946569565696656976569865699657006570165702657036570465705657066570765708657096571065711657126571365714657156571665717657186571965720657216572265723657246572565726657276572865729657306573165732657336573465735657366573765738657396574065741657426574365744657456574665747657486574965750657516575265753657546575565756657576575865759657606576165762657636576465765657666576765768657696577065771657726577365774657756577665777657786577965780657816578265783657846578565786657876578865789657906579165792657936579465795657966579765798657996580065801658026580365804658056580665807658086580965810658116581265813658146581565816658176581865819658206582165822658236582465825658266582765828658296583065831658326583365834658356583665837658386583965840658416584265843658446584565846658476584865849658506585165852658536585465855658566585765858658596586065861658626586365864658656586665867658686586965870658716587265873658746587565876658776587865879658806588165882658836588465885658866588765888658896589065891658926589365894658956589665897658986589965900659016590265903659046590565906659076590865909659106591165912659136591465915659166591765918659196592065921659226592365924659256592665927659286592965930659316593265933659346593565936659376593865939659406594165942659436594465945659466594765948659496595065951659526595365954659556595665957659586595965960659616596265963659646596565966659676596865969659706597165972659736597465975659766597765978659796598065981659826598365984659856598665987659886598965990659916599265993659946599565996659976599865999660006600166002660036600466005660066600766008660096601066011660126601366014660156601666017660186601966020660216602266023660246602566026660276602866029660306603166032660336603466035660366603766038660396604066041660426604366044660456604666047660486604966050660516605266053660546605566056660576605866059660606606166062660636606466065660666606766068660696607066071660726607366074660756607666077660786607966080660816608266083660846608566086660876608866089660906609166092660936609466095660966609766098660996610066101661026610366104661056610666107661086610966110661116611266113661146611566116661176611866119661206612166122661236612466125661266612766128661296613066131661326613366134661356613666137661386613966140661416614266143661446614566146661476614866149661506615166152661536615466155661566615766158661596616066161661626616366164661656616666167661686616966170661716617266173661746617566176661776617866179661806618166182661836618466185661866618766188661896619066191661926619366194661956619666197661986619966200662016620266203662046620566206662076620866209662106621166212662136621466215662166621766218662196622066221662226622366224662256622666227662286622966230662316623266233662346623566236662376623866239662406624166242662436624466245662466624766248662496625066251662526625366254662556625666257662586625966260662616626266263662646626566266662676626866269662706627166272662736627466275662766627766278662796628066281662826628366284662856628666287662886628966290662916629266293662946629566296662976629866299663006630166302663036630466305663066630766308663096631066311663126631366314663156631666317663186631966320663216632266323663246632566326663276632866329663306633166332663336633466335663366633766338663396634066341663426634366344663456634666347663486634966350663516635266353663546635566356663576635866359663606636166362663636636466365663666636766368663696637066371663726637366374663756637666377663786637966380663816638266383663846638566386663876638866389663906639166392663936639466395663966639766398663996640066401664026640366404664056640666407664086640966410664116641266413664146641566416664176641866419664206642166422664236642466425664266642766428664296643066431664326643366434664356643666437664386643966440664416644266443664446644566446664476644866449664506645166452664536645466455664566645766458664596646066461664626646366464664656646666467664686646966470664716647266473664746647566476664776647866479664806648166482664836648466485664866648766488664896649066491664926649366494664956649666497664986649966500665016650266503665046650566506665076650866509665106651166512665136651466515665166651766518665196652066521665226652366524665256652666527665286652966530665316653266533665346653566536665376653866539665406654166542665436654466545665466654766548665496655066551665526655366554665556655666557665586655966560665616656266563665646656566566665676656866569665706657166572665736657466575665766657766578665796658066581665826658366584665856658666587665886658966590665916659266593665946659566596665976659866599666006660166602666036660466605666066660766608666096661066611666126661366614666156661666617666186661966620666216662266623666246662566626666276662866629666306663166632666336663466635666366663766638666396664066641666426664366644666456664666647666486664966650666516665266653666546665566656666576665866659666606666166662666636666466665666666666766668666696667066671666726667366674666756667666677666786667966680666816668266683666846668566686666876668866689666906669166692666936669466695666966669766698666996670066701667026670366704667056670666707667086670966710667116671266713667146671566716667176671866719667206672166722667236672466725667266672766728667296673066731667326673366734667356673666737667386673966740667416674266743667446674566746667476674866749667506675166752667536675466755667566675766758667596676066761667626676366764667656676666767667686676966770667716677266773667746677566776667776677866779667806678166782667836678466785667866678766788667896679066791667926679366794667956679666797667986679966800668016680266803668046680566806668076680866809668106681166812668136681466815668166681766818668196682066821668226682366824668256682666827668286682966830668316683266833668346683566836668376683866839668406684166842668436684466845668466684766848668496685066851668526685366854668556685666857668586685966860668616686266863668646686566866668676686866869668706687166872668736687466875668766687766878668796688066881668826688366884668856688666887668886688966890668916689266893668946689566896668976689866899669006690166902669036690466905669066690766908669096691066911669126691366914669156691666917669186691966920669216692266923669246692566926669276692866929669306693166932669336693466935669366693766938669396694066941669426694366944669456694666947669486694966950669516695266953669546695566956669576695866959669606696166962669636696466965669666696766968669696697066971669726697366974669756697666977669786697966980669816698266983669846698566986669876698866989669906699166992669936699466995669966699766998669996700067001670026700367004670056700667007670086700967010670116701267013670146701567016670176701867019670206702167022670236702467025670266702767028670296703067031670326703367034670356703667037670386703967040670416704267043670446704567046670476704867049670506705167052670536705467055670566705767058670596706067061670626706367064670656706667067670686706967070670716707267073670746707567076670776707867079670806708167082670836708467085670866708767088670896709067091670926709367094670956709667097670986709967100671016710267103671046710567106671076710867109671106711167112671136711467115671166711767118671196712067121671226712367124671256712667127671286712967130671316713267133671346713567136671376713867139671406714167142671436714467145671466714767148671496715067151671526715367154671556715667157671586715967160671616716267163671646716567166671676716867169671706717167172671736717467175671766717767178671796718067181671826718367184671856718667187671886718967190671916719267193671946719567196671976719867199672006720167202672036720467205672066720767208672096721067211672126721367214672156721667217672186721967220672216722267223672246722567226672276722867229672306723167232672336723467235672366723767238672396724067241672426724367244672456724667247672486724967250672516725267253672546725567256672576725867259672606726167262672636726467265672666726767268672696727067271672726727367274672756727667277672786727967280672816728267283672846728567286672876728867289672906729167292672936729467295672966729767298672996730067301673026730367304673056730667307673086730967310673116731267313673146731567316673176731867319673206732167322673236732467325673266732767328673296733067331673326733367334673356733667337673386733967340673416734267343673446734567346673476734867349673506735167352673536735467355673566735767358673596736067361673626736367364673656736667367673686736967370673716737267373673746737567376673776737867379673806738167382673836738467385673866738767388673896739067391673926739367394673956739667397673986739967400674016740267403674046740567406674076740867409674106741167412674136741467415674166741767418674196742067421674226742367424674256742667427674286742967430674316743267433674346743567436674376743867439674406744167442674436744467445674466744767448674496745067451674526745367454674556745667457674586745967460674616746267463674646746567466674676746867469674706747167472674736747467475674766747767478674796748067481674826748367484674856748667487674886748967490674916749267493674946749567496674976749867499675006750167502675036750467505675066750767508675096751067511675126751367514675156751667517675186751967520675216752267523675246752567526675276752867529675306753167532675336753467535675366753767538675396754067541675426754367544675456754667547675486754967550675516755267553675546755567556675576755867559675606756167562675636756467565675666756767568675696757067571675726757367574675756757667577675786757967580675816758267583675846758567586675876758867589675906759167592675936759467595675966759767598675996760067601676026760367604676056760667607676086760967610676116761267613676146761567616676176761867619676206762167622676236762467625676266762767628676296763067631676326763367634676356763667637676386763967640676416764267643676446764567646676476764867649676506765167652676536765467655676566765767658676596766067661676626766367664676656766667667676686766967670676716767267673676746767567676676776767867679676806768167682676836768467685676866768767688676896769067691676926769367694676956769667697676986769967700677016770267703677046770567706677076770867709677106771167712677136771467715677166771767718677196772067721677226772367724677256772667727677286772967730677316773267733677346773567736677376773867739677406774167742677436774467745677466774767748677496775067751677526775367754677556775667757677586775967760677616776267763677646776567766677676776867769677706777167772677736777467775677766777767778677796778067781677826778367784677856778667787677886778967790677916779267793677946779567796677976779867799678006780167802678036780467805678066780767808678096781067811678126781367814678156781667817678186781967820678216782267823678246782567826678276782867829678306783167832678336783467835678366783767838678396784067841678426784367844678456784667847678486784967850678516785267853678546785567856678576785867859678606786167862678636786467865678666786767868678696787067871678726787367874678756787667877678786787967880678816788267883678846788567886678876788867889678906789167892678936789467895678966789767898678996790067901679026790367904679056790667907679086790967910679116791267913679146791567916679176791867919679206792167922679236792467925679266792767928679296793067931679326793367934679356793667937679386793967940679416794267943679446794567946679476794867949679506795167952679536795467955679566795767958679596796067961679626796367964679656796667967679686796967970679716797267973679746797567976679776797867979679806798167982679836798467985679866798767988679896799067991679926799367994679956799667997679986799968000680016800268003680046800568006680076800868009680106801168012680136801468015680166801768018680196802068021680226802368024680256802668027680286802968030680316803268033680346803568036680376803868039680406804168042680436804468045680466804768048680496805068051680526805368054680556805668057680586805968060680616806268063680646806568066680676806868069680706807168072680736807468075680766807768078680796808068081680826808368084680856808668087680886808968090680916809268093680946809568096680976809868099681006810168102681036810468105681066810768108681096811068111681126811368114681156811668117681186811968120681216812268123681246812568126681276812868129681306813168132681336813468135681366813768138681396814068141681426814368144681456814668147681486814968150681516815268153681546815568156681576815868159681606816168162681636816468165681666816768168681696817068171681726817368174681756817668177681786817968180681816818268183681846818568186681876818868189681906819168192681936819468195681966819768198681996820068201682026820368204682056820668207682086820968210682116821268213682146821568216682176821868219682206822168222682236822468225682266822768228682296823068231682326823368234682356823668237682386823968240682416824268243682446824568246682476824868249682506825168252682536825468255682566825768258682596826068261682626826368264682656826668267682686826968270682716827268273682746827568276682776827868279682806828168282682836828468285682866828768288682896829068291682926829368294682956829668297682986829968300683016830268303683046830568306683076830868309683106831168312683136831468315683166831768318683196832068321683226832368324683256832668327683286832968330683316833268333683346833568336683376833868339683406834168342683436834468345683466834768348683496835068351683526835368354683556835668357683586835968360683616836268363683646836568366683676836868369683706837168372683736837468375683766837768378683796838068381683826838368384683856838668387683886838968390683916839268393683946839568396683976839868399684006840168402684036840468405684066840768408684096841068411684126841368414684156841668417684186841968420684216842268423684246842568426684276842868429684306843168432684336843468435684366843768438684396844068441684426844368444684456844668447684486844968450684516845268453684546845568456684576845868459684606846168462684636846468465684666846768468684696847068471684726847368474684756847668477684786847968480684816848268483684846848568486684876848868489684906849168492684936849468495684966849768498684996850068501685026850368504685056850668507685086850968510685116851268513685146851568516685176851868519685206852168522685236852468525685266852768528685296853068531685326853368534685356853668537685386853968540685416854268543685446854568546685476854868549685506855168552685536855468555685566855768558685596856068561685626856368564685656856668567685686856968570685716857268573685746857568576685776857868579685806858168582685836858468585685866858768588685896859068591685926859368594685956859668597685986859968600686016860268603686046860568606686076860868609686106861168612686136861468615686166861768618686196862068621686226862368624686256862668627686286862968630686316863268633686346863568636686376863868639686406864168642686436864468645686466864768648686496865068651686526865368654686556865668657686586865968660686616866268663686646866568666686676866868669686706867168672686736867468675686766867768678686796868068681686826868368684686856868668687686886868968690686916869268693686946869568696686976869868699687006870168702687036870468705687066870768708687096871068711687126871368714687156871668717687186871968720687216872268723687246872568726687276872868729687306873168732687336873468735687366873768738687396874068741687426874368744687456874668747687486874968750687516875268753687546875568756687576875868759687606876168762687636876468765687666876768768687696877068771687726877368774687756877668777687786877968780687816878268783687846878568786687876878868789687906879168792687936879468795687966879768798687996880068801688026880368804688056880668807688086880968810688116881268813688146881568816688176881868819688206882168822688236882468825688266882768828688296883068831688326883368834688356883668837688386883968840688416884268843688446884568846688476884868849688506885168852688536885468855688566885768858688596886068861688626886368864688656886668867688686886968870688716887268873688746887568876688776887868879688806888168882688836888468885688866888768888688896889068891688926889368894688956889668897688986889968900689016890268903689046890568906689076890868909689106891168912689136891468915689166891768918689196892068921689226892368924689256892668927689286892968930689316893268933689346893568936689376893868939689406894168942689436894468945689466894768948689496895068951689526895368954689556895668957689586895968960689616896268963689646896568966689676896868969689706897168972689736897468975689766897768978689796898068981689826898368984689856898668987689886898968990689916899268993689946899568996689976899868999690006900169002690036900469005690066900769008690096901069011690126901369014690156901669017690186901969020690216902269023690246902569026690276902869029690306903169032690336903469035690366903769038690396904069041690426904369044690456904669047690486904969050690516905269053690546905569056690576905869059690606906169062690636906469065690666906769068690696907069071690726907369074690756907669077690786907969080690816908269083690846908569086690876908869089690906909169092690936909469095690966909769098690996910069101691026910369104691056910669107691086910969110691116911269113691146911569116691176911869119691206912169122691236912469125691266912769128691296913069131691326913369134691356913669137691386913969140691416914269143691446914569146691476914869149691506915169152691536915469155691566915769158691596916069161691626916369164691656916669167691686916969170691716917269173691746917569176691776917869179691806918169182691836918469185691866918769188691896919069191691926919369194691956919669197691986919969200692016920269203692046920569206692076920869209692106921169212692136921469215692166921769218692196922069221692226922369224692256922669227692286922969230692316923269233692346923569236692376923869239692406924169242692436924469245692466924769248692496925069251692526925369254692556925669257692586925969260692616926269263692646926569266692676926869269692706927169272692736927469275692766927769278692796928069281692826928369284692856928669287692886928969290692916929269293692946929569296692976929869299693006930169302693036930469305693066930769308693096931069311693126931369314693156931669317693186931969320693216932269323693246932569326693276932869329693306933169332693336933469335693366933769338693396934069341693426934369344693456934669347693486934969350693516935269353693546935569356693576935869359693606936169362693636936469365693666936769368693696937069371693726937369374693756937669377693786937969380693816938269383693846938569386693876938869389693906939169392693936939469395693966939769398693996940069401694026940369404694056940669407694086940969410694116941269413694146941569416694176941869419694206942169422694236942469425694266942769428694296943069431694326943369434694356943669437694386943969440694416944269443694446944569446694476944869449694506945169452694536945469455694566945769458694596946069461694626946369464694656946669467694686946969470694716947269473694746947569476694776947869479694806948169482694836948469485694866948769488694896949069491694926949369494694956949669497694986949969500695016950269503695046950569506695076950869509695106951169512695136951469515695166951769518695196952069521695226952369524695256952669527695286952969530695316953269533695346953569536695376953869539695406954169542695436954469545695466954769548695496955069551695526955369554695556955669557695586955969560695616956269563695646956569566695676956869569695706957169572695736957469575695766957769578695796958069581695826958369584695856958669587695886958969590695916959269593695946959569596695976959869599696006960169602696036960469605696066960769608696096961069611696126961369614696156961669617696186961969620696216962269623696246962569626696276962869629696306963169632696336963469635696366963769638696396964069641696426964369644696456964669647696486964969650696516965269653696546965569656696576965869659696606966169662696636966469665696666966769668696696967069671696726967369674696756967669677696786967969680696816968269683696846968569686696876968869689696906969169692696936969469695696966969769698696996970069701697026970369704697056970669707697086970969710697116971269713697146971569716697176971869719697206972169722697236972469725697266972769728697296973069731697326973369734697356973669737697386973969740697416974269743697446974569746697476974869749697506975169752697536975469755697566975769758697596976069761697626976369764697656976669767697686976969770697716977269773697746977569776697776977869779697806978169782697836978469785697866978769788697896979069791697926979369794697956979669797697986979969800698016980269803698046980569806698076980869809698106981169812698136981469815698166981769818698196982069821698226982369824698256982669827698286982969830698316983269833698346983569836698376983869839698406984169842698436984469845698466984769848698496985069851698526985369854698556985669857698586985969860698616986269863698646986569866698676986869869698706987169872698736987469875698766987769878698796988069881698826988369884698856988669887698886988969890698916989269893698946989569896698976989869899699006990169902699036990469905699066990769908699096991069911699126991369914699156991669917699186991969920699216992269923699246992569926699276992869929699306993169932699336993469935699366993769938699396994069941699426994369944699456994669947699486994969950699516995269953699546995569956699576995869959699606996169962699636996469965699666996769968699696997069971699726997369974699756997669977699786997969980699816998269983699846998569986699876998869989699906999169992699936999469995699966999769998699997000070001700027000370004700057000670007700087000970010700117001270013700147001570016700177001870019700207002170022700237002470025700267002770028700297003070031700327003370034700357003670037700387003970040700417004270043700447004570046700477004870049700507005170052700537005470055700567005770058700597006070061700627006370064700657006670067700687006970070700717007270073700747007570076700777007870079700807008170082700837008470085700867008770088700897009070091700927009370094700957009670097700987009970100701017010270103701047010570106701077010870109701107011170112701137011470115701167011770118701197012070121701227012370124701257012670127701287012970130701317013270133701347013570136701377013870139701407014170142701437014470145701467014770148701497015070151701527015370154701557015670157701587015970160701617016270163701647016570166701677016870169701707017170172701737017470175701767017770178701797018070181701827018370184701857018670187701887018970190701917019270193701947019570196701977019870199702007020170202702037020470205702067020770208702097021070211702127021370214702157021670217702187021970220702217022270223702247022570226702277022870229702307023170232702337023470235702367023770238702397024070241702427024370244702457024670247702487024970250702517025270253702547025570256702577025870259702607026170262702637026470265702667026770268702697027070271702727027370274702757027670277702787027970280702817028270283702847028570286702877028870289702907029170292702937029470295702967029770298702997030070301703027030370304703057030670307703087030970310703117031270313703147031570316703177031870319703207032170322703237032470325703267032770328703297033070331703327033370334703357033670337703387033970340703417034270343703447034570346703477034870349703507035170352703537035470355703567035770358703597036070361703627036370364703657036670367703687036970370703717037270373703747037570376703777037870379703807038170382703837038470385703867038770388703897039070391703927039370394703957039670397703987039970400704017040270403704047040570406704077040870409704107041170412704137041470415704167041770418704197042070421704227042370424704257042670427704287042970430704317043270433704347043570436704377043870439704407044170442704437044470445704467044770448704497045070451704527045370454704557045670457704587045970460704617046270463704647046570466704677046870469704707047170472704737047470475704767047770478704797048070481704827048370484704857048670487704887048970490704917049270493704947049570496704977049870499705007050170502705037050470505705067050770508705097051070511705127051370514705157051670517705187051970520705217052270523705247052570526705277052870529705307053170532705337053470535705367053770538705397054070541705427054370544705457054670547705487054970550705517055270553705547055570556705577055870559705607056170562705637056470565705667056770568705697057070571705727057370574705757057670577705787057970580705817058270583705847058570586705877058870589705907059170592705937059470595705967059770598705997060070601706027060370604706057060670607706087060970610706117061270613706147061570616706177061870619706207062170622706237062470625706267062770628706297063070631706327063370634706357063670637706387063970640706417064270643706447064570646706477064870649706507065170652706537065470655706567065770658706597066070661706627066370664706657066670667706687066970670706717067270673706747067570676706777067870679706807068170682706837068470685706867068770688706897069070691706927069370694706957069670697706987069970700707017070270703707047070570706707077070870709707107071170712707137071470715707167071770718707197072070721707227072370724707257072670727707287072970730707317073270733707347073570736707377073870739707407074170742707437074470745707467074770748707497075070751707527075370754707557075670757707587075970760707617076270763707647076570766707677076870769707707077170772707737077470775707767077770778707797078070781707827078370784707857078670787707887078970790707917079270793707947079570796707977079870799708007080170802708037080470805708067080770808708097081070811708127081370814708157081670817708187081970820708217082270823708247082570826708277082870829708307083170832708337083470835708367083770838708397084070841708427084370844708457084670847708487084970850708517085270853708547085570856708577085870859708607086170862708637086470865708667086770868708697087070871708727087370874708757087670877708787087970880708817088270883708847088570886708877088870889708907089170892708937089470895708967089770898708997090070901709027090370904709057090670907709087090970910709117091270913709147091570916709177091870919709207092170922709237092470925709267092770928709297093070931709327093370934709357093670937709387093970940709417094270943709447094570946709477094870949709507095170952709537095470955709567095770958709597096070961709627096370964709657096670967709687096970970709717097270973709747097570976709777097870979709807098170982709837098470985709867098770988709897099070991709927099370994709957099670997709987099971000710017100271003710047100571006710077100871009710107101171012710137101471015710167101771018710197102071021710227102371024710257102671027710287102971030710317103271033710347103571036710377103871039710407104171042710437104471045710467104771048710497105071051710527105371054710557105671057710587105971060710617106271063710647106571066710677106871069710707107171072710737107471075710767107771078710797108071081710827108371084710857108671087710887108971090710917109271093710947109571096710977109871099711007110171102711037110471105711067110771108711097111071111711127111371114711157111671117711187111971120711217112271123711247112571126711277112871129711307113171132711337113471135711367113771138711397114071141711427114371144711457114671147711487114971150711517115271153711547115571156711577115871159711607116171162711637116471165711667116771168711697117071171711727117371174711757117671177711787117971180711817118271183711847118571186711877118871189711907119171192711937119471195711967119771198711997120071201712027120371204712057120671207712087120971210712117121271213712147121571216712177121871219712207122171222712237122471225712267122771228712297123071231712327123371234712357123671237712387123971240712417124271243712447124571246712477124871249712507125171252712537125471255712567125771258712597126071261712627126371264712657126671267712687126971270712717127271273712747127571276712777127871279712807128171282712837128471285712867128771288712897129071291712927129371294712957129671297712987129971300713017130271303713047130571306713077130871309713107131171312713137131471315713167131771318713197132071321713227132371324713257132671327713287132971330713317133271333713347133571336713377133871339713407134171342713437134471345713467134771348713497135071351713527135371354713557135671357713587135971360713617136271363713647136571366713677136871369713707137171372713737137471375713767137771378713797138071381713827138371384713857138671387713887138971390713917139271393713947139571396713977139871399714007140171402714037140471405714067140771408714097141071411714127141371414714157141671417714187141971420714217142271423714247142571426714277142871429714307143171432714337143471435714367143771438714397144071441714427144371444714457144671447714487144971450714517145271453714547145571456714577145871459714607146171462714637146471465714667146771468714697147071471714727147371474714757147671477714787147971480714817148271483714847148571486714877148871489714907149171492714937149471495714967149771498714997150071501715027150371504715057150671507715087150971510715117151271513715147151571516715177151871519715207152171522715237152471525715267152771528715297153071531715327153371534715357153671537715387153971540715417154271543715447154571546715477154871549715507155171552715537155471555715567155771558715597156071561715627156371564715657156671567715687156971570715717157271573715747157571576715777157871579715807158171582715837158471585715867158771588715897159071591715927159371594715957159671597715987159971600716017160271603716047160571606716077160871609716107161171612716137161471615716167161771618716197162071621716227162371624716257162671627716287162971630716317163271633716347163571636716377163871639716407164171642716437164471645716467164771648716497165071651716527165371654716557165671657716587165971660716617166271663716647166571666716677166871669716707167171672716737167471675716767167771678716797168071681716827168371684716857168671687716887168971690716917169271693716947169571696716977169871699717007170171702717037170471705717067170771708717097171071711717127171371714717157171671717717187171971720717217172271723717247172571726717277172871729717307173171732717337173471735717367173771738717397174071741717427174371744717457174671747717487174971750717517175271753717547175571756717577175871759717607176171762717637176471765717667176771768717697177071771717727177371774717757177671777717787177971780717817178271783717847178571786717877178871789717907179171792717937179471795717967179771798717997180071801718027180371804718057180671807718087180971810718117181271813718147181571816718177181871819718207182171822718237182471825718267182771828718297183071831718327183371834718357183671837718387183971840718417184271843718447184571846718477184871849718507185171852718537185471855718567185771858718597186071861718627186371864718657186671867718687186971870718717187271873718747187571876718777187871879718807188171882718837188471885718867188771888718897189071891718927189371894718957189671897718987189971900719017190271903719047190571906719077190871909719107191171912719137191471915719167191771918719197192071921719227192371924719257192671927719287192971930719317193271933719347193571936719377193871939719407194171942719437194471945719467194771948719497195071951719527195371954719557195671957719587195971960719617196271963719647196571966719677196871969719707197171972719737197471975719767197771978719797198071981719827198371984719857198671987719887198971990719917199271993719947199571996719977199871999720007200172002720037200472005720067200772008720097201072011720127201372014720157201672017720187201972020720217202272023720247202572026720277202872029720307203172032720337203472035720367203772038720397204072041720427204372044720457204672047720487204972050720517205272053720547205572056720577205872059720607206172062720637206472065720667206772068720697207072071720727207372074720757207672077720787207972080720817208272083720847208572086720877208872089720907209172092720937209472095720967209772098720997210072101721027210372104721057210672107721087210972110721117211272113721147211572116721177211872119721207212172122721237212472125721267212772128721297213072131721327213372134721357213672137721387213972140721417214272143721447214572146721477214872149721507215172152721537215472155721567215772158721597216072161721627216372164721657216672167721687216972170721717217272173721747217572176721777217872179721807218172182721837218472185721867218772188721897219072191721927219372194721957219672197721987219972200722017220272203722047220572206722077220872209722107221172212722137221472215722167221772218722197222072221722227222372224722257222672227722287222972230722317223272233722347223572236722377223872239722407224172242722437224472245722467224772248722497225072251722527225372254722557225672257722587225972260722617226272263722647226572266722677226872269722707227172272722737227472275722767227772278722797228072281722827228372284722857228672287722887228972290722917229272293722947229572296722977229872299723007230172302723037230472305723067230772308723097231072311723127231372314723157231672317723187231972320723217232272323723247232572326723277232872329723307233172332723337233472335723367233772338723397234072341723427234372344723457234672347723487234972350723517235272353723547235572356723577235872359723607236172362723637236472365723667236772368723697237072371723727237372374723757237672377723787237972380723817238272383723847238572386723877238872389723907239172392723937239472395723967239772398723997240072401724027240372404724057240672407724087240972410724117241272413724147241572416724177241872419724207242172422724237242472425724267242772428724297243072431724327243372434724357243672437724387243972440724417244272443724447244572446724477244872449724507245172452724537245472455724567245772458724597246072461724627246372464724657246672467724687246972470724717247272473724747247572476724777247872479724807248172482724837248472485724867248772488724897249072491724927249372494724957249672497724987249972500725017250272503725047250572506725077250872509725107251172512725137251472515725167251772518725197252072521725227252372524725257252672527725287252972530725317253272533725347253572536725377253872539725407254172542725437254472545725467254772548725497255072551725527255372554725557255672557725587255972560725617256272563725647256572566725677256872569725707257172572725737257472575725767257772578725797258072581725827258372584725857258672587725887258972590725917259272593725947259572596725977259872599 |
- /*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- *===-----------------------------------------------------------------------===
- */
- #ifndef __ARM_NEON_H
- #define __ARM_NEON_H
- #if !defined(__ARM_NEON)
- #error "NEON support not enabled"
- #endif
- #include <stdint.h>
- typedef float float32_t;
- typedef __fp16 float16_t;
- #ifdef __aarch64__
- typedef double float64_t;
- #endif
- #ifdef __aarch64__
- typedef uint8_t poly8_t;
- typedef uint16_t poly16_t;
- typedef uint64_t poly64_t;
- typedef __uint128_t poly128_t;
- #else
- typedef int8_t poly8_t;
- typedef int16_t poly16_t;
- #endif
- typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
- typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
- typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
- typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
- typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
- typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
- typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
- typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
- typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
- typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
- typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
- typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
- typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
- typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
- typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
- typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
- typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
- typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
- typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
- typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
- #ifdef __aarch64__
- typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
- typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
- #endif
- typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
- typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
- typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
- typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
- #ifdef __aarch64__
- typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
- typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
- #endif
- typedef struct int8x8x2_t {
- int8x8_t val[2];
- } int8x8x2_t;
- typedef struct int8x16x2_t {
- int8x16_t val[2];
- } int8x16x2_t;
- typedef struct int16x4x2_t {
- int16x4_t val[2];
- } int16x4x2_t;
- typedef struct int16x8x2_t {
- int16x8_t val[2];
- } int16x8x2_t;
- typedef struct int32x2x2_t {
- int32x2_t val[2];
- } int32x2x2_t;
- typedef struct int32x4x2_t {
- int32x4_t val[2];
- } int32x4x2_t;
- typedef struct int64x1x2_t {
- int64x1_t val[2];
- } int64x1x2_t;
- typedef struct int64x2x2_t {
- int64x2_t val[2];
- } int64x2x2_t;
- typedef struct uint8x8x2_t {
- uint8x8_t val[2];
- } uint8x8x2_t;
- typedef struct uint8x16x2_t {
- uint8x16_t val[2];
- } uint8x16x2_t;
- typedef struct uint16x4x2_t {
- uint16x4_t val[2];
- } uint16x4x2_t;
- typedef struct uint16x8x2_t {
- uint16x8_t val[2];
- } uint16x8x2_t;
- typedef struct uint32x2x2_t {
- uint32x2_t val[2];
- } uint32x2x2_t;
- typedef struct uint32x4x2_t {
- uint32x4_t val[2];
- } uint32x4x2_t;
- typedef struct uint64x1x2_t {
- uint64x1_t val[2];
- } uint64x1x2_t;
- typedef struct uint64x2x2_t {
- uint64x2_t val[2];
- } uint64x2x2_t;
- typedef struct float16x4x2_t {
- float16x4_t val[2];
- } float16x4x2_t;
- typedef struct float16x8x2_t {
- float16x8_t val[2];
- } float16x8x2_t;
- typedef struct float32x2x2_t {
- float32x2_t val[2];
- } float32x2x2_t;
- typedef struct float32x4x2_t {
- float32x4_t val[2];
- } float32x4x2_t;
- #ifdef __aarch64__
- typedef struct float64x1x2_t {
- float64x1_t val[2];
- } float64x1x2_t;
- typedef struct float64x2x2_t {
- float64x2_t val[2];
- } float64x2x2_t;
- #endif
- typedef struct poly8x8x2_t {
- poly8x8_t val[2];
- } poly8x8x2_t;
- typedef struct poly8x16x2_t {
- poly8x16_t val[2];
- } poly8x16x2_t;
- typedef struct poly16x4x2_t {
- poly16x4_t val[2];
- } poly16x4x2_t;
- typedef struct poly16x8x2_t {
- poly16x8_t val[2];
- } poly16x8x2_t;
- #ifdef __aarch64__
- typedef struct poly64x1x2_t {
- poly64x1_t val[2];
- } poly64x1x2_t;
- typedef struct poly64x2x2_t {
- poly64x2_t val[2];
- } poly64x2x2_t;
- #endif
- typedef struct int8x8x3_t {
- int8x8_t val[3];
- } int8x8x3_t;
- typedef struct int8x16x3_t {
- int8x16_t val[3];
- } int8x16x3_t;
- typedef struct int16x4x3_t {
- int16x4_t val[3];
- } int16x4x3_t;
- typedef struct int16x8x3_t {
- int16x8_t val[3];
- } int16x8x3_t;
- typedef struct int32x2x3_t {
- int32x2_t val[3];
- } int32x2x3_t;
- typedef struct int32x4x3_t {
- int32x4_t val[3];
- } int32x4x3_t;
- typedef struct int64x1x3_t {
- int64x1_t val[3];
- } int64x1x3_t;
- typedef struct int64x2x3_t {
- int64x2_t val[3];
- } int64x2x3_t;
- typedef struct uint8x8x3_t {
- uint8x8_t val[3];
- } uint8x8x3_t;
- typedef struct uint8x16x3_t {
- uint8x16_t val[3];
- } uint8x16x3_t;
- typedef struct uint16x4x3_t {
- uint16x4_t val[3];
- } uint16x4x3_t;
- typedef struct uint16x8x3_t {
- uint16x8_t val[3];
- } uint16x8x3_t;
- typedef struct uint32x2x3_t {
- uint32x2_t val[3];
- } uint32x2x3_t;
- typedef struct uint32x4x3_t {
- uint32x4_t val[3];
- } uint32x4x3_t;
- typedef struct uint64x1x3_t {
- uint64x1_t val[3];
- } uint64x1x3_t;
- typedef struct uint64x2x3_t {
- uint64x2_t val[3];
- } uint64x2x3_t;
- typedef struct float16x4x3_t {
- float16x4_t val[3];
- } float16x4x3_t;
- typedef struct float16x8x3_t {
- float16x8_t val[3];
- } float16x8x3_t;
- typedef struct float32x2x3_t {
- float32x2_t val[3];
- } float32x2x3_t;
- typedef struct float32x4x3_t {
- float32x4_t val[3];
- } float32x4x3_t;
- #ifdef __aarch64__
- typedef struct float64x1x3_t {
- float64x1_t val[3];
- } float64x1x3_t;
- typedef struct float64x2x3_t {
- float64x2_t val[3];
- } float64x2x3_t;
- #endif
- typedef struct poly8x8x3_t {
- poly8x8_t val[3];
- } poly8x8x3_t;
- typedef struct poly8x16x3_t {
- poly8x16_t val[3];
- } poly8x16x3_t;
- typedef struct poly16x4x3_t {
- poly16x4_t val[3];
- } poly16x4x3_t;
- typedef struct poly16x8x3_t {
- poly16x8_t val[3];
- } poly16x8x3_t;
- #ifdef __aarch64__
- typedef struct poly64x1x3_t {
- poly64x1_t val[3];
- } poly64x1x3_t;
- typedef struct poly64x2x3_t {
- poly64x2_t val[3];
- } poly64x2x3_t;
- #endif
- typedef struct int8x8x4_t {
- int8x8_t val[4];
- } int8x8x4_t;
- typedef struct int8x16x4_t {
- int8x16_t val[4];
- } int8x16x4_t;
- typedef struct int16x4x4_t {
- int16x4_t val[4];
- } int16x4x4_t;
- typedef struct int16x8x4_t {
- int16x8_t val[4];
- } int16x8x4_t;
- typedef struct int32x2x4_t {
- int32x2_t val[4];
- } int32x2x4_t;
- typedef struct int32x4x4_t {
- int32x4_t val[4];
- } int32x4x4_t;
- typedef struct int64x1x4_t {
- int64x1_t val[4];
- } int64x1x4_t;
- typedef struct int64x2x4_t {
- int64x2_t val[4];
- } int64x2x4_t;
- typedef struct uint8x8x4_t {
- uint8x8_t val[4];
- } uint8x8x4_t;
- typedef struct uint8x16x4_t {
- uint8x16_t val[4];
- } uint8x16x4_t;
- typedef struct uint16x4x4_t {
- uint16x4_t val[4];
- } uint16x4x4_t;
- typedef struct uint16x8x4_t {
- uint16x8_t val[4];
- } uint16x8x4_t;
- typedef struct uint32x2x4_t {
- uint32x2_t val[4];
- } uint32x2x4_t;
- typedef struct uint32x4x4_t {
- uint32x4_t val[4];
- } uint32x4x4_t;
- typedef struct uint64x1x4_t {
- uint64x1_t val[4];
- } uint64x1x4_t;
- typedef struct uint64x2x4_t {
- uint64x2_t val[4];
- } uint64x2x4_t;
- typedef struct float16x4x4_t {
- float16x4_t val[4];
- } float16x4x4_t;
- typedef struct float16x8x4_t {
- float16x8_t val[4];
- } float16x8x4_t;
- typedef struct float32x2x4_t {
- float32x2_t val[4];
- } float32x2x4_t;
- typedef struct float32x4x4_t {
- float32x4_t val[4];
- } float32x4x4_t;
- #ifdef __aarch64__
- typedef struct float64x1x4_t {
- float64x1_t val[4];
- } float64x1x4_t;
- typedef struct float64x2x4_t {
- float64x2_t val[4];
- } float64x2x4_t;
- #endif
- typedef struct poly8x8x4_t {
- poly8x8_t val[4];
- } poly8x8x4_t;
- typedef struct poly8x16x4_t {
- poly8x16_t val[4];
- } poly8x16x4_t;
- typedef struct poly16x4x4_t {
- poly16x4_t val[4];
- } poly16x4x4_t;
- typedef struct poly16x8x4_t {
- poly16x8_t val[4];
- } poly16x8x4_t;
- #ifdef __aarch64__
- typedef struct poly64x1x4_t {
- poly64x1_t val[4];
- } poly64x1x4_t;
- typedef struct poly64x2x4_t {
- poly64x2_t val[4];
- } poly64x2x4_t;
- #endif
- #define __ai static inline __attribute__((__always_inline__, __nodebug__))
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vabsq_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vabsq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vabsq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vabsq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vabsq_s32(int32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vabsq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vabsq_s16(int16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vabsq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vabs_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vabs_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vabs_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vabs_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vabs_s32(int32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vabs_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vabs_s16(int16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vabs_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __p0 & __p1;
- return __ret;
- }
- #else
- __ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 & __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __p0 & ~__p1;
- return __ret;
- }
- #else
- __ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 & ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
- poly16x4_t __ret;
- __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
- return __ret;
- }
- #else
- __ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
- poly16x8_t __ret;
- __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
- return __ret;
- }
- #else
- __ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vclsq_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vclsq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vclsq_s32(int32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vclsq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vclsq_s16(int16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vclsq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vcls_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vcls_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vcls_s32(int32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vcls_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vcls_s16(int16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vcls_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vclzq_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vclzq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vclzq_s32(int32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vclzq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vclzq_s16(int16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vclzq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vclz_u8(uint8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vclz_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vclz_u32(uint32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vclz_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vclz_u16(uint16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vclz_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vclz_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vclz_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vclz_s32(int32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vclz_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vclz_s16(int16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vclz_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vcntq_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vcntq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vcnt_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vcnt_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #else
- __ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #else
- __ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
- return __ret;
- }
- #else
- __ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #else
- __ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
- return __ret;
- }
- #else
- __ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
- return __ret;
- }
- #else
- __ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
- return __ret;
- }
- #else
- __ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vcreate_p8(uint64_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vcreate_p8(uint64_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vcreate_p16(uint64_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vcreate_p16(uint64_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcreate_u8(uint64_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vcreate_u8(uint64_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcreate_u32(uint64_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vcreate_u32(uint64_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcreate_u64(uint64_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vcreate_u64(uint64_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcreate_u16(uint64_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vcreate_u16(uint64_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vcreate_s8(uint64_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vcreate_s8(uint64_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vcreate_f32(uint64_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vcreate_f32(uint64_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vcreate_f16(uint64_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vcreate_f16(uint64_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vcreate_s32(uint64_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vcreate_s32(uint64_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vcreate_s64(uint64_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vcreate_s64(uint64_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vcreate_s16(uint64_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vcreate_s16(uint64_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
- __ret; \
- })
- #else
- #define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
- __ret; \
- })
- #else
- #define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
- __ret; \
- })
- #else
- #define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
- __ret; \
- })
- #else
- #define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
- __ret; \
- })
- #else
- #define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
- __ret; \
- })
- #else
- #define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- poly16x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x16_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- poly16x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x16_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x16_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vdup_n_p8(poly8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai poly8x8_t vdup_n_p8(poly8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vdup_n_p16(poly16_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai poly16x4_t vdup_n_p16(poly16_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vdupq_n_s8(int8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai int8x16_t vdupq_n_s8(int8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vdupq_n_f32(float32_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai float32x4_t vdupq_n_f32(float32_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_n_f16(__p0) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16x8_t __ret; \
- __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
- __ret; \
- })
- #else
- #define vdupq_n_f16(__p0) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16x8_t __ret; \
- __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vdupq_n_s32(int32_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai int32x4_t vdupq_n_s32(int32_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vdupq_n_s64(int64_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai int64x2_t vdupq_n_s64(int64_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vdupq_n_s16(int16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai int16x8_t vdupq_n_s16(int16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vdup_n_u8(uint8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai uint8x8_t vdup_n_u8(uint8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vdup_n_u32(uint32_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai uint32x2_t vdup_n_u32(uint32_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vdup_n_u64(uint64_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) {__p0};
- return __ret;
- }
- #else
- __ai uint64x1_t vdup_n_u64(uint64_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) {__p0};
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vdup_n_u16(uint16_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai uint16x4_t vdup_n_u16(uint16_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vdup_n_s8(int8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai int8x8_t vdup_n_s8(int8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vdup_n_f32(float32_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai float32x2_t vdup_n_f32(float32_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_n_f16(__p0) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16x4_t __ret; \
- __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
- __ret; \
- })
- #else
- #define vdup_n_f16(__p0) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16x4_t __ret; \
- __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vdup_n_s32(int32_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai int32x2_t vdup_n_s32(int32_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vdup_n_s64(int64_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) {__p0};
- return __ret;
- }
- #else
- __ai int64x1_t vdup_n_s64(int64_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) {__p0};
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vdup_n_s16(int16_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai int16x4_t vdup_n_s16(int16_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __p0 ^ __p1;
- return __ret;
- }
- #else
- __ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 ^ __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
- __ret; \
- })
- #else
- #define vext_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
- __ret; \
- })
- #else
- #define vext_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
- __ret; \
- })
- #else
- #define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
- __ret; \
- })
- #else
- #define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
- __ret; \
- })
- #else
- #define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
- __ret; \
- })
- #else
- #define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
- __ret; \
- })
- #else
- #define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
- __ret; \
- })
- #else
- #define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
- __ret; \
- })
- #else
- #define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
- __ret; \
- })
- #else
- #define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
- __ret; \
- })
- #else
- #define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
- __ret; \
- })
- #else
- #define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
- __ret; \
- })
- #else
- #define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
- __ret; \
- })
- #else
- #define vext_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
- __ret; \
- })
- #else
- #define vext_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #else
- #define vext_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
- __ret; \
- })
- #else
- #define vext_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
- __ret; \
- })
- #else
- #define vext_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
- __ret; \
- })
- #else
- #define vext_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
- __ret; \
- })
- #else
- #define vext_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #else
- #define vext_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
- __ret; \
- })
- #else
- #define vext_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #else
- __ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #else
- __ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
- return __ret;
- }
- #else
- __ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
- uint64x1_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1);
- return __ret;
- }
- #else
- __ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x1_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vget_high_s8(int8x16_t __p0) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #else
- __ai int8x8_t vget_high_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vget_high_f32(float32x4_t __p0) {
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
- return __ret;
- }
- #else
- __ai float32x2_t vget_high_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vget_high_f16(float16x8_t __p0) {
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai float16x4_t vget_high_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vget_high_s32(int32x4_t __p0) {
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
- return __ret;
- }
- #else
- __ai int32x2_t vget_high_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vget_high_s64(int64x2_t __p0) {
- int64x1_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1);
- return __ret;
- }
- #else
- __ai int64x1_t vget_high_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x1_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vget_high_s16(int16x8_t __p0) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai int16x4_t vget_high_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8_t __ret; \
- __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8_t __ret; \
- __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8_t __ret; \
- __ret = (poly8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16_t __ret; \
- __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- poly16_t __ret; \
- __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16_t __ret; \
- __ret = (poly16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8_t __ret; \
- __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8_t __ret; \
- __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8_t __ret; \
- __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16_t __ret; \
- __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16_t __ret; \
- __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16_t __ret; \
- __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vgetq_lane_f32((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vget_lane_f32((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vget_lane_i32((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vget_lane_i16((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
- return __ret;
- }
- #else
- __ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
- return __ret;
- }
- #else
- __ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
- uint64x1_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0);
- return __ret;
- }
- #else
- __ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x1_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
- return __ret;
- }
- #else
- __ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vget_low_s8(int8x16_t __p0) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
- return __ret;
- }
- #else
- __ai int8x8_t vget_low_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vget_low_f32(float32x4_t __p0) {
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
- return __ret;
- }
- #else
- __ai float32x2_t vget_low_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vget_low_f16(float16x8_t __p0) {
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
- return __ret;
- }
- #else
- __ai float16x4_t vget_low_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vget_low_s32(int32x4_t __p0) {
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
- return __ret;
- }
- #else
- __ai int32x2_t vget_low_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vget_low_s64(int64x2_t __p0) {
- int64x1_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0);
- return __ret;
- }
- #else
- __ai int64x1_t vget_low_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x1_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vget_low_s16(int16x8_t __p0) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
- return __ret;
- }
- #else
- __ai int16x4_t vget_low_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p8(__p0) __extension__ ({ \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
- __ret; \
- })
- #else
- #define vld1_p8(__p0) __extension__ ({ \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p16(__p0) __extension__ ({ \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
- __ret; \
- })
- #else
- #define vld1_p16(__p0) __extension__ ({ \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p8(__p0) __extension__ ({ \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
- __ret; \
- })
- #else
- #define vld1q_p8(__p0) __extension__ ({ \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p16(__p0) __extension__ ({ \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
- __ret; \
- })
- #else
- #define vld1q_p16(__p0) __extension__ ({ \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u8(__p0) __extension__ ({ \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
- __ret; \
- })
- #else
- #define vld1q_u8(__p0) __extension__ ({ \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u32(__p0) __extension__ ({ \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
- __ret; \
- })
- #else
- #define vld1q_u32(__p0) __extension__ ({ \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u64(__p0) __extension__ ({ \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
- __ret; \
- })
- #else
- #define vld1q_u64(__p0) __extension__ ({ \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u16(__p0) __extension__ ({ \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
- __ret; \
- })
- #else
- #define vld1q_u16(__p0) __extension__ ({ \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s8(__p0) __extension__ ({ \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
- __ret; \
- })
- #else
- #define vld1q_s8(__p0) __extension__ ({ \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f32(__p0) __extension__ ({ \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
- __ret; \
- })
- #else
- #define vld1q_f32(__p0) __extension__ ({ \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f16(__p0) __extension__ ({ \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
- __ret; \
- })
- #else
- #define vld1q_f16(__p0) __extension__ ({ \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s32(__p0) __extension__ ({ \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
- __ret; \
- })
- #else
- #define vld1q_s32(__p0) __extension__ ({ \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s64(__p0) __extension__ ({ \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
- __ret; \
- })
- #else
- #define vld1q_s64(__p0) __extension__ ({ \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s16(__p0) __extension__ ({ \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
- __ret; \
- })
- #else
- #define vld1q_s16(__p0) __extension__ ({ \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u8(__p0) __extension__ ({ \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
- __ret; \
- })
- #else
- #define vld1_u8(__p0) __extension__ ({ \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u32(__p0) __extension__ ({ \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
- __ret; \
- })
- #else
- #define vld1_u32(__p0) __extension__ ({ \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u64(__p0) __extension__ ({ \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
- __ret; \
- })
- #else
- #define vld1_u64(__p0) __extension__ ({ \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u16(__p0) __extension__ ({ \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
- __ret; \
- })
- #else
- #define vld1_u16(__p0) __extension__ ({ \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s8(__p0) __extension__ ({ \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
- __ret; \
- })
- #else
- #define vld1_s8(__p0) __extension__ ({ \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f32(__p0) __extension__ ({ \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
- __ret; \
- })
- #else
- #define vld1_f32(__p0) __extension__ ({ \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f16(__p0) __extension__ ({ \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
- __ret; \
- })
- #else
- #define vld1_f16(__p0) __extension__ ({ \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s32(__p0) __extension__ ({ \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
- __ret; \
- })
- #else
- #define vld1_s32(__p0) __extension__ ({ \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s64(__p0) __extension__ ({ \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
- __ret; \
- })
- #else
- #define vld1_s64(__p0) __extension__ ({ \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s16(__p0) __extension__ ({ \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
- __ret; \
- })
- #else
- #define vld1_s16(__p0) __extension__ ({ \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_p8(__p0) __extension__ ({ \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
- __ret; \
- })
- #else
- #define vld1_dup_p8(__p0) __extension__ ({ \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_p16(__p0) __extension__ ({ \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
- __ret; \
- })
- #else
- #define vld1_dup_p16(__p0) __extension__ ({ \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_p8(__p0) __extension__ ({ \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
- __ret; \
- })
- #else
- #define vld1q_dup_p8(__p0) __extension__ ({ \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_p16(__p0) __extension__ ({ \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
- __ret; \
- })
- #else
- #define vld1q_dup_p16(__p0) __extension__ ({ \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_u8(__p0) __extension__ ({ \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
- __ret; \
- })
- #else
- #define vld1q_dup_u8(__p0) __extension__ ({ \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_u32(__p0) __extension__ ({ \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
- __ret; \
- })
- #else
- #define vld1q_dup_u32(__p0) __extension__ ({ \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_u64(__p0) __extension__ ({ \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
- __ret; \
- })
- #else
- #define vld1q_dup_u64(__p0) __extension__ ({ \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_u16(__p0) __extension__ ({ \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
- __ret; \
- })
- #else
- #define vld1q_dup_u16(__p0) __extension__ ({ \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_s8(__p0) __extension__ ({ \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
- __ret; \
- })
- #else
- #define vld1q_dup_s8(__p0) __extension__ ({ \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_f32(__p0) __extension__ ({ \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
- __ret; \
- })
- #else
- #define vld1q_dup_f32(__p0) __extension__ ({ \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_f16(__p0) __extension__ ({ \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
- __ret; \
- })
- #else
- #define vld1q_dup_f16(__p0) __extension__ ({ \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_s32(__p0) __extension__ ({ \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
- __ret; \
- })
- #else
- #define vld1q_dup_s32(__p0) __extension__ ({ \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_s64(__p0) __extension__ ({ \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
- __ret; \
- })
- #else
- #define vld1q_dup_s64(__p0) __extension__ ({ \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_s16(__p0) __extension__ ({ \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
- __ret; \
- })
- #else
- #define vld1q_dup_s16(__p0) __extension__ ({ \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_u8(__p0) __extension__ ({ \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
- __ret; \
- })
- #else
- #define vld1_dup_u8(__p0) __extension__ ({ \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_u32(__p0) __extension__ ({ \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
- __ret; \
- })
- #else
- #define vld1_dup_u32(__p0) __extension__ ({ \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_u64(__p0) __extension__ ({ \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
- __ret; \
- })
- #else
- #define vld1_dup_u64(__p0) __extension__ ({ \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_u16(__p0) __extension__ ({ \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
- __ret; \
- })
- #else
- #define vld1_dup_u16(__p0) __extension__ ({ \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_s8(__p0) __extension__ ({ \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
- __ret; \
- })
- #else
- #define vld1_dup_s8(__p0) __extension__ ({ \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_f32(__p0) __extension__ ({ \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
- __ret; \
- })
- #else
- #define vld1_dup_f32(__p0) __extension__ ({ \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_f16(__p0) __extension__ ({ \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
- __ret; \
- })
- #else
- #define vld1_dup_f16(__p0) __extension__ ({ \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_s32(__p0) __extension__ ({ \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
- __ret; \
- })
- #else
- #define vld1_dup_s32(__p0) __extension__ ({ \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_s64(__p0) __extension__ ({ \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
- __ret; \
- })
- #else
- #define vld1_dup_s64(__p0) __extension__ ({ \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_s16(__p0) __extension__ ({ \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
- __ret; \
- })
- #else
- #define vld1_dup_s16(__p0) __extension__ ({ \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
- __ret; \
- })
- #else
- #define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
- __ret; \
- })
- #else
- #define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
- __ret; \
- })
- #else
- #define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
- __ret; \
- })
- #else
- #define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
- __ret; \
- })
- #else
- #define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
- __ret; \
- })
- #else
- #define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
- __ret; \
- })
- #else
- #define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
- __ret; \
- })
- #else
- #define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s1 = __p1; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
- __ret; \
- })
- #else
- #define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s1 = __p1; \
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s1 = __p1; \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
- __ret; \
- })
- #else
- #define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s1 = __p1; \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s1 = __p1; \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
- __ret; \
- })
- #else
- #define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s1 = __p1; \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
- __ret; \
- })
- #else
- #define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
- __ret; \
- })
- #else
- #define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s1 = __p1; \
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
- __ret; \
- })
- #else
- #define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
- __ret; \
- })
- #else
- #define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
- __ret; \
- })
- #else
- #define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #else
- #define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
- __ret; \
- })
- #else
- #define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s1 = __p1; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
- __ret; \
- })
- #else
- #define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s1 = __p1; \
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s1 = __p1; \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
- __ret; \
- })
- #else
- #define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s1 = __p1; \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s1 = __p1; \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
- __ret; \
- })
- #else
- #define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s1 = __p1; \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
- __ret; \
- })
- #else
- #define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #else
- #define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
- __ret; \
- })
- #else
- #define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_p8(__p0) __extension__ ({ \
- poly8x8x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 4); \
- __ret; \
- })
- #else
- #define vld2_p8(__p0) __extension__ ({ \
- poly8x8x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_p16(__p0) __extension__ ({ \
- poly16x4x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 5); \
- __ret; \
- })
- #else
- #define vld2_p16(__p0) __extension__ ({ \
- poly16x4x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_p8(__p0) __extension__ ({ \
- poly8x16x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 36); \
- __ret; \
- })
- #else
- #define vld2q_p8(__p0) __extension__ ({ \
- poly8x16x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_p16(__p0) __extension__ ({ \
- poly16x8x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 37); \
- __ret; \
- })
- #else
- #define vld2q_p16(__p0) __extension__ ({ \
- poly16x8x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_u8(__p0) __extension__ ({ \
- uint8x16x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 48); \
- __ret; \
- })
- #else
- #define vld2q_u8(__p0) __extension__ ({ \
- uint8x16x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_u32(__p0) __extension__ ({ \
- uint32x4x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 50); \
- __ret; \
- })
- #else
- #define vld2q_u32(__p0) __extension__ ({ \
- uint32x4x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_u16(__p0) __extension__ ({ \
- uint16x8x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 49); \
- __ret; \
- })
- #else
- #define vld2q_u16(__p0) __extension__ ({ \
- uint16x8x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_s8(__p0) __extension__ ({ \
- int8x16x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 32); \
- __ret; \
- })
- #else
- #define vld2q_s8(__p0) __extension__ ({ \
- int8x16x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_f32(__p0) __extension__ ({ \
- float32x4x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 41); \
- __ret; \
- })
- #else
- #define vld2q_f32(__p0) __extension__ ({ \
- float32x4x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_f16(__p0) __extension__ ({ \
- float16x8x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 40); \
- __ret; \
- })
- #else
- #define vld2q_f16(__p0) __extension__ ({ \
- float16x8x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_s32(__p0) __extension__ ({ \
- int32x4x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 34); \
- __ret; \
- })
- #else
- #define vld2q_s32(__p0) __extension__ ({ \
- int32x4x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_s16(__p0) __extension__ ({ \
- int16x8x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 33); \
- __ret; \
- })
- #else
- #define vld2q_s16(__p0) __extension__ ({ \
- int16x8x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_u8(__p0) __extension__ ({ \
- uint8x8x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 16); \
- __ret; \
- })
- #else
- #define vld2_u8(__p0) __extension__ ({ \
- uint8x8x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_u32(__p0) __extension__ ({ \
- uint32x2x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 18); \
- __ret; \
- })
- #else
- #define vld2_u32(__p0) __extension__ ({ \
- uint32x2x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_u64(__p0) __extension__ ({ \
- uint64x1x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 19); \
- __ret; \
- })
- #else
- #define vld2_u64(__p0) __extension__ ({ \
- uint64x1x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_u16(__p0) __extension__ ({ \
- uint16x4x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 17); \
- __ret; \
- })
- #else
- #define vld2_u16(__p0) __extension__ ({ \
- uint16x4x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_s8(__p0) __extension__ ({ \
- int8x8x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 0); \
- __ret; \
- })
- #else
- #define vld2_s8(__p0) __extension__ ({ \
- int8x8x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_f32(__p0) __extension__ ({ \
- float32x2x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 9); \
- __ret; \
- })
- #else
- #define vld2_f32(__p0) __extension__ ({ \
- float32x2x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_f16(__p0) __extension__ ({ \
- float16x4x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 8); \
- __ret; \
- })
- #else
- #define vld2_f16(__p0) __extension__ ({ \
- float16x4x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_s32(__p0) __extension__ ({ \
- int32x2x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 2); \
- __ret; \
- })
- #else
- #define vld2_s32(__p0) __extension__ ({ \
- int32x2x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_s64(__p0) __extension__ ({ \
- int64x1x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 3); \
- __ret; \
- })
- #else
- #define vld2_s64(__p0) __extension__ ({ \
- int64x1x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_s16(__p0) __extension__ ({ \
- int16x4x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 1); \
- __ret; \
- })
- #else
- #define vld2_s16(__p0) __extension__ ({ \
- int16x4x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_p8(__p0) __extension__ ({ \
- poly8x8x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
- __ret; \
- })
- #else
- #define vld2_dup_p8(__p0) __extension__ ({ \
- poly8x8x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_p16(__p0) __extension__ ({ \
- poly16x4x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
- __ret; \
- })
- #else
- #define vld2_dup_p16(__p0) __extension__ ({ \
- poly16x4x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_u8(__p0) __extension__ ({ \
- uint8x8x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
- __ret; \
- })
- #else
- #define vld2_dup_u8(__p0) __extension__ ({ \
- uint8x8x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_u32(__p0) __extension__ ({ \
- uint32x2x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
- __ret; \
- })
- #else
- #define vld2_dup_u32(__p0) __extension__ ({ \
- uint32x2x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_u64(__p0) __extension__ ({ \
- uint64x1x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
- __ret; \
- })
- #else
- #define vld2_dup_u64(__p0) __extension__ ({ \
- uint64x1x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_u16(__p0) __extension__ ({ \
- uint16x4x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
- __ret; \
- })
- #else
- #define vld2_dup_u16(__p0) __extension__ ({ \
- uint16x4x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_s8(__p0) __extension__ ({ \
- int8x8x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
- __ret; \
- })
- #else
- #define vld2_dup_s8(__p0) __extension__ ({ \
- int8x8x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_f32(__p0) __extension__ ({ \
- float32x2x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
- __ret; \
- })
- #else
- #define vld2_dup_f32(__p0) __extension__ ({ \
- float32x2x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_f16(__p0) __extension__ ({ \
- float16x4x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
- __ret; \
- })
- #else
- #define vld2_dup_f16(__p0) __extension__ ({ \
- float16x4x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_s32(__p0) __extension__ ({ \
- int32x2x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
- __ret; \
- })
- #else
- #define vld2_dup_s32(__p0) __extension__ ({ \
- int32x2x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_s64(__p0) __extension__ ({ \
- int64x1x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
- __ret; \
- })
- #else
- #define vld2_dup_s64(__p0) __extension__ ({ \
- int64x1x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_s16(__p0) __extension__ ({ \
- int16x4x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
- __ret; \
- })
- #else
- #define vld2_dup_s16(__p0) __extension__ ({ \
- int16x4x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x2_t __s1 = __p1; \
- poly8x8x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
- __ret; \
- })
- #else
- #define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x2_t __s1 = __p1; \
- poly8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x2_t __s1 = __p1; \
- poly16x4x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
- __ret; \
- })
- #else
- #define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x2_t __s1 = __p1; \
- poly16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- poly16x4x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x2_t __s1 = __p1; \
- poly16x8x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
- __ret; \
- })
- #else
- #define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x2_t __s1 = __p1; \
- poly16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x2_t __s1 = __p1; \
- uint32x4x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
- __ret; \
- })
- #else
- #define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x2_t __s1 = __p1; \
- uint32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- uint32x4x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x2_t __s1 = __p1; \
- uint16x8x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
- __ret; \
- })
- #else
- #define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x2_t __s1 = __p1; \
- uint16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x2_t __s1 = __p1; \
- float32x4x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 41); \
- __ret; \
- })
- #else
- #define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x2_t __s1 = __p1; \
- float32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- float32x4x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x2_t __s1 = __p1; \
- float16x8x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 40); \
- __ret; \
- })
- #else
- #define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x2_t __s1 = __p1; \
- float16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x2_t __s1 = __p1; \
- int32x4x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 34); \
- __ret; \
- })
- #else
- #define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x2_t __s1 = __p1; \
- int32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- int32x4x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x2_t __s1 = __p1; \
- int16x8x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 33); \
- __ret; \
- })
- #else
- #define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x2_t __s1 = __p1; \
- int16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x2_t __s1 = __p1; \
- uint8x8x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
- __ret; \
- })
- #else
- #define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x2_t __s1 = __p1; \
- uint8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x2_t __s1 = __p1; \
- uint32x2x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
- __ret; \
- })
- #else
- #define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x2_t __s1 = __p1; \
- uint32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- uint32x2x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x2_t __s1 = __p1; \
- uint16x4x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
- __ret; \
- })
- #else
- #define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x2_t __s1 = __p1; \
- uint16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- uint16x4x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x2_t __s1 = __p1; \
- int8x8x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
- __ret; \
- })
- #else
- #define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x2_t __s1 = __p1; \
- int8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x2_t __s1 = __p1; \
- float32x2x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 9); \
- __ret; \
- })
- #else
- #define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x2_t __s1 = __p1; \
- float32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- float32x2x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x2_t __s1 = __p1; \
- float16x4x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 8); \
- __ret; \
- })
- #else
- #define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x2_t __s1 = __p1; \
- float16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- float16x4x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x2_t __s1 = __p1; \
- int32x2x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 2); \
- __ret; \
- })
- #else
- #define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x2_t __s1 = __p1; \
- int32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- int32x2x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x2_t __s1 = __p1; \
- int16x4x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 1); \
- __ret; \
- })
- #else
- #define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x2_t __s1 = __p1; \
- int16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- int16x4x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_p8(__p0) __extension__ ({ \
- poly8x8x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 4); \
- __ret; \
- })
- #else
- #define vld3_p8(__p0) __extension__ ({ \
- poly8x8x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_p16(__p0) __extension__ ({ \
- poly16x4x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 5); \
- __ret; \
- })
- #else
- #define vld3_p16(__p0) __extension__ ({ \
- poly16x4x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_p8(__p0) __extension__ ({ \
- poly8x16x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 36); \
- __ret; \
- })
- #else
- #define vld3q_p8(__p0) __extension__ ({ \
- poly8x16x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_p16(__p0) __extension__ ({ \
- poly16x8x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 37); \
- __ret; \
- })
- #else
- #define vld3q_p16(__p0) __extension__ ({ \
- poly16x8x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_u8(__p0) __extension__ ({ \
- uint8x16x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 48); \
- __ret; \
- })
- #else
- #define vld3q_u8(__p0) __extension__ ({ \
- uint8x16x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_u32(__p0) __extension__ ({ \
- uint32x4x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 50); \
- __ret; \
- })
- #else
- #define vld3q_u32(__p0) __extension__ ({ \
- uint32x4x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_u16(__p0) __extension__ ({ \
- uint16x8x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 49); \
- __ret; \
- })
- #else
- #define vld3q_u16(__p0) __extension__ ({ \
- uint16x8x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_s8(__p0) __extension__ ({ \
- int8x16x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 32); \
- __ret; \
- })
- #else
- #define vld3q_s8(__p0) __extension__ ({ \
- int8x16x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_f32(__p0) __extension__ ({ \
- float32x4x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 41); \
- __ret; \
- })
- #else
- #define vld3q_f32(__p0) __extension__ ({ \
- float32x4x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_f16(__p0) __extension__ ({ \
- float16x8x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 40); \
- __ret; \
- })
- #else
- #define vld3q_f16(__p0) __extension__ ({ \
- float16x8x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_s32(__p0) __extension__ ({ \
- int32x4x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 34); \
- __ret; \
- })
- #else
- #define vld3q_s32(__p0) __extension__ ({ \
- int32x4x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_s16(__p0) __extension__ ({ \
- int16x8x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 33); \
- __ret; \
- })
- #else
- #define vld3q_s16(__p0) __extension__ ({ \
- int16x8x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_u8(__p0) __extension__ ({ \
- uint8x8x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 16); \
- __ret; \
- })
- #else
- #define vld3_u8(__p0) __extension__ ({ \
- uint8x8x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_u32(__p0) __extension__ ({ \
- uint32x2x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 18); \
- __ret; \
- })
- #else
- #define vld3_u32(__p0) __extension__ ({ \
- uint32x2x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_u64(__p0) __extension__ ({ \
- uint64x1x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 19); \
- __ret; \
- })
- #else
- #define vld3_u64(__p0) __extension__ ({ \
- uint64x1x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_u16(__p0) __extension__ ({ \
- uint16x4x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 17); \
- __ret; \
- })
- #else
- #define vld3_u16(__p0) __extension__ ({ \
- uint16x4x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_s8(__p0) __extension__ ({ \
- int8x8x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 0); \
- __ret; \
- })
- #else
- #define vld3_s8(__p0) __extension__ ({ \
- int8x8x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_f32(__p0) __extension__ ({ \
- float32x2x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 9); \
- __ret; \
- })
- #else
- #define vld3_f32(__p0) __extension__ ({ \
- float32x2x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_f16(__p0) __extension__ ({ \
- float16x4x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 8); \
- __ret; \
- })
- #else
- #define vld3_f16(__p0) __extension__ ({ \
- float16x4x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_s32(__p0) __extension__ ({ \
- int32x2x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 2); \
- __ret; \
- })
- #else
- #define vld3_s32(__p0) __extension__ ({ \
- int32x2x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_s64(__p0) __extension__ ({ \
- int64x1x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 3); \
- __ret; \
- })
- #else
- #define vld3_s64(__p0) __extension__ ({ \
- int64x1x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_s16(__p0) __extension__ ({ \
- int16x4x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 1); \
- __ret; \
- })
- #else
- #define vld3_s16(__p0) __extension__ ({ \
- int16x4x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_p8(__p0) __extension__ ({ \
- poly8x8x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
- __ret; \
- })
- #else
- #define vld3_dup_p8(__p0) __extension__ ({ \
- poly8x8x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_p16(__p0) __extension__ ({ \
- poly16x4x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
- __ret; \
- })
- #else
- #define vld3_dup_p16(__p0) __extension__ ({ \
- poly16x4x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_u8(__p0) __extension__ ({ \
- uint8x8x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
- __ret; \
- })
- #else
- #define vld3_dup_u8(__p0) __extension__ ({ \
- uint8x8x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_u32(__p0) __extension__ ({ \
- uint32x2x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
- __ret; \
- })
- #else
- #define vld3_dup_u32(__p0) __extension__ ({ \
- uint32x2x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_u64(__p0) __extension__ ({ \
- uint64x1x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
- __ret; \
- })
- #else
- #define vld3_dup_u64(__p0) __extension__ ({ \
- uint64x1x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_u16(__p0) __extension__ ({ \
- uint16x4x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
- __ret; \
- })
- #else
- #define vld3_dup_u16(__p0) __extension__ ({ \
- uint16x4x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_s8(__p0) __extension__ ({ \
- int8x8x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
- __ret; \
- })
- #else
- #define vld3_dup_s8(__p0) __extension__ ({ \
- int8x8x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_f32(__p0) __extension__ ({ \
- float32x2x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
- __ret; \
- })
- #else
- #define vld3_dup_f32(__p0) __extension__ ({ \
- float32x2x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_f16(__p0) __extension__ ({ \
- float16x4x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
- __ret; \
- })
- #else
- #define vld3_dup_f16(__p0) __extension__ ({ \
- float16x4x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_s32(__p0) __extension__ ({ \
- int32x2x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
- __ret; \
- })
- #else
- #define vld3_dup_s32(__p0) __extension__ ({ \
- int32x2x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_s64(__p0) __extension__ ({ \
- int64x1x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
- __ret; \
- })
- #else
- #define vld3_dup_s64(__p0) __extension__ ({ \
- int64x1x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_s16(__p0) __extension__ ({ \
- int16x4x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
- __ret; \
- })
- #else
- #define vld3_dup_s16(__p0) __extension__ ({ \
- int16x4x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x3_t __s1 = __p1; \
- poly8x8x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
- __ret; \
- })
- #else
- #define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x3_t __s1 = __p1; \
- poly8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x3_t __s1 = __p1; \
- poly16x4x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
- __ret; \
- })
- #else
- #define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x3_t __s1 = __p1; \
- poly16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- poly16x4x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x3_t __s1 = __p1; \
- poly16x8x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
- __ret; \
- })
- #else
- #define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x3_t __s1 = __p1; \
- poly16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x3_t __s1 = __p1; \
- uint32x4x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
- __ret; \
- })
- #else
- #define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x3_t __s1 = __p1; \
- uint32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- uint32x4x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x3_t __s1 = __p1; \
- uint16x8x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
- __ret; \
- })
- #else
- #define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x3_t __s1 = __p1; \
- uint16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x3_t __s1 = __p1; \
- float32x4x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
- __ret; \
- })
- #else
- #define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x3_t __s1 = __p1; \
- float32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- float32x4x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x3_t __s1 = __p1; \
- float16x8x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
- __ret; \
- })
- #else
- #define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x3_t __s1 = __p1; \
- float16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x3_t __s1 = __p1; \
- int32x4x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
- __ret; \
- })
- #else
- #define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x3_t __s1 = __p1; \
- int32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- int32x4x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x3_t __s1 = __p1; \
- int16x8x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
- __ret; \
- })
- #else
- #define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x3_t __s1 = __p1; \
- int16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x3_t __s1 = __p1; \
- uint8x8x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
- __ret; \
- })
- #else
- #define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x3_t __s1 = __p1; \
- uint8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x3_t __s1 = __p1; \
- uint32x2x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
- __ret; \
- })
- #else
- #define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x3_t __s1 = __p1; \
- uint32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- uint32x2x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x3_t __s1 = __p1; \
- uint16x4x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
- __ret; \
- })
- #else
- #define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x3_t __s1 = __p1; \
- uint16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- uint16x4x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x3_t __s1 = __p1; \
- int8x8x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
- __ret; \
- })
- #else
- #define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x3_t __s1 = __p1; \
- int8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x3_t __s1 = __p1; \
- float32x2x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
- __ret; \
- })
- #else
- #define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x3_t __s1 = __p1; \
- float32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- float32x2x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x3_t __s1 = __p1; \
- float16x4x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
- __ret; \
- })
- #else
- #define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x3_t __s1 = __p1; \
- float16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- float16x4x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x3_t __s1 = __p1; \
- int32x2x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
- __ret; \
- })
- #else
- #define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x3_t __s1 = __p1; \
- int32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- int32x2x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x3_t __s1 = __p1; \
- int16x4x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
- __ret; \
- })
- #else
- #define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x3_t __s1 = __p1; \
- int16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- int16x4x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_p8(__p0) __extension__ ({ \
- poly8x8x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 4); \
- __ret; \
- })
- #else
- #define vld4_p8(__p0) __extension__ ({ \
- poly8x8x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_p16(__p0) __extension__ ({ \
- poly16x4x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 5); \
- __ret; \
- })
- #else
- #define vld4_p16(__p0) __extension__ ({ \
- poly16x4x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_p8(__p0) __extension__ ({ \
- poly8x16x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 36); \
- __ret; \
- })
- #else
- #define vld4q_p8(__p0) __extension__ ({ \
- poly8x16x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_p16(__p0) __extension__ ({ \
- poly16x8x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 37); \
- __ret; \
- })
- #else
- #define vld4q_p16(__p0) __extension__ ({ \
- poly16x8x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_u8(__p0) __extension__ ({ \
- uint8x16x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 48); \
- __ret; \
- })
- #else
- #define vld4q_u8(__p0) __extension__ ({ \
- uint8x16x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_u32(__p0) __extension__ ({ \
- uint32x4x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 50); \
- __ret; \
- })
- #else
- #define vld4q_u32(__p0) __extension__ ({ \
- uint32x4x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_u16(__p0) __extension__ ({ \
- uint16x8x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 49); \
- __ret; \
- })
- #else
- #define vld4q_u16(__p0) __extension__ ({ \
- uint16x8x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_s8(__p0) __extension__ ({ \
- int8x16x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 32); \
- __ret; \
- })
- #else
- #define vld4q_s8(__p0) __extension__ ({ \
- int8x16x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_f32(__p0) __extension__ ({ \
- float32x4x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 41); \
- __ret; \
- })
- #else
- #define vld4q_f32(__p0) __extension__ ({ \
- float32x4x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_f16(__p0) __extension__ ({ \
- float16x8x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 40); \
- __ret; \
- })
- #else
- #define vld4q_f16(__p0) __extension__ ({ \
- float16x8x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_s32(__p0) __extension__ ({ \
- int32x4x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 34); \
- __ret; \
- })
- #else
- #define vld4q_s32(__p0) __extension__ ({ \
- int32x4x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_s16(__p0) __extension__ ({ \
- int16x8x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 33); \
- __ret; \
- })
- #else
- #define vld4q_s16(__p0) __extension__ ({ \
- int16x8x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_u8(__p0) __extension__ ({ \
- uint8x8x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 16); \
- __ret; \
- })
- #else
- #define vld4_u8(__p0) __extension__ ({ \
- uint8x8x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_u32(__p0) __extension__ ({ \
- uint32x2x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 18); \
- __ret; \
- })
- #else
- #define vld4_u32(__p0) __extension__ ({ \
- uint32x2x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_u64(__p0) __extension__ ({ \
- uint64x1x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 19); \
- __ret; \
- })
- #else
- #define vld4_u64(__p0) __extension__ ({ \
- uint64x1x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_u16(__p0) __extension__ ({ \
- uint16x4x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 17); \
- __ret; \
- })
- #else
- #define vld4_u16(__p0) __extension__ ({ \
- uint16x4x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_s8(__p0) __extension__ ({ \
- int8x8x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 0); \
- __ret; \
- })
- #else
- #define vld4_s8(__p0) __extension__ ({ \
- int8x8x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_f32(__p0) __extension__ ({ \
- float32x2x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 9); \
- __ret; \
- })
- #else
- #define vld4_f32(__p0) __extension__ ({ \
- float32x2x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_f16(__p0) __extension__ ({ \
- float16x4x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 8); \
- __ret; \
- })
- #else
- #define vld4_f16(__p0) __extension__ ({ \
- float16x4x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_s32(__p0) __extension__ ({ \
- int32x2x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 2); \
- __ret; \
- })
- #else
- #define vld4_s32(__p0) __extension__ ({ \
- int32x2x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_s64(__p0) __extension__ ({ \
- int64x1x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 3); \
- __ret; \
- })
- #else
- #define vld4_s64(__p0) __extension__ ({ \
- int64x1x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_s16(__p0) __extension__ ({ \
- int16x4x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 1); \
- __ret; \
- })
- #else
- #define vld4_s16(__p0) __extension__ ({ \
- int16x4x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_p8(__p0) __extension__ ({ \
- poly8x8x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
- __ret; \
- })
- #else
- #define vld4_dup_p8(__p0) __extension__ ({ \
- poly8x8x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_p16(__p0) __extension__ ({ \
- poly16x4x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
- __ret; \
- })
- #else
- #define vld4_dup_p16(__p0) __extension__ ({ \
- poly16x4x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_u8(__p0) __extension__ ({ \
- uint8x8x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
- __ret; \
- })
- #else
- #define vld4_dup_u8(__p0) __extension__ ({ \
- uint8x8x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_u32(__p0) __extension__ ({ \
- uint32x2x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
- __ret; \
- })
- #else
- #define vld4_dup_u32(__p0) __extension__ ({ \
- uint32x2x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_u64(__p0) __extension__ ({ \
- uint64x1x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
- __ret; \
- })
- #else
- #define vld4_dup_u64(__p0) __extension__ ({ \
- uint64x1x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_u16(__p0) __extension__ ({ \
- uint16x4x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
- __ret; \
- })
- #else
- #define vld4_dup_u16(__p0) __extension__ ({ \
- uint16x4x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_s8(__p0) __extension__ ({ \
- int8x8x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
- __ret; \
- })
- #else
- #define vld4_dup_s8(__p0) __extension__ ({ \
- int8x8x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_f32(__p0) __extension__ ({ \
- float32x2x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
- __ret; \
- })
- #else
- #define vld4_dup_f32(__p0) __extension__ ({ \
- float32x2x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_f16(__p0) __extension__ ({ \
- float16x4x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
- __ret; \
- })
- #else
- #define vld4_dup_f16(__p0) __extension__ ({ \
- float16x4x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_s32(__p0) __extension__ ({ \
- int32x2x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
- __ret; \
- })
- #else
- #define vld4_dup_s32(__p0) __extension__ ({ \
- int32x2x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_s64(__p0) __extension__ ({ \
- int64x1x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
- __ret; \
- })
- #else
- #define vld4_dup_s64(__p0) __extension__ ({ \
- int64x1x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_s16(__p0) __extension__ ({ \
- int16x4x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
- __ret; \
- })
- #else
- #define vld4_dup_s16(__p0) __extension__ ({ \
- int16x4x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x4_t __s1 = __p1; \
- poly8x8x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
- __ret; \
- })
- #else
- #define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x4_t __s1 = __p1; \
- poly8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x4_t __s1 = __p1; \
- poly16x4x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
- __ret; \
- })
- #else
- #define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x4_t __s1 = __p1; \
- poly16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- poly16x4x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x4_t __s1 = __p1; \
- poly16x8x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
- __ret; \
- })
- #else
- #define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x4_t __s1 = __p1; \
- poly16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x4_t __s1 = __p1; \
- uint32x4x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
- __ret; \
- })
- #else
- #define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x4_t __s1 = __p1; \
- uint32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- uint32x4x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x4_t __s1 = __p1; \
- uint16x8x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
- __ret; \
- })
- #else
- #define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x4_t __s1 = __p1; \
- uint16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x4_t __s1 = __p1; \
- float32x4x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
- __ret; \
- })
- #else
- #define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x4_t __s1 = __p1; \
- float32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- float32x4x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x4_t __s1 = __p1; \
- float16x8x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
- __ret; \
- })
- #else
- #define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x4_t __s1 = __p1; \
- float16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x4_t __s1 = __p1; \
- int32x4x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
- __ret; \
- })
- #else
- #define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x4_t __s1 = __p1; \
- int32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- int32x4x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x4_t __s1 = __p1; \
- int16x8x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
- __ret; \
- })
- #else
- #define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x4_t __s1 = __p1; \
- int16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x4_t __s1 = __p1; \
- uint8x8x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
- __ret; \
- })
- #else
- #define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x4_t __s1 = __p1; \
- uint8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x4_t __s1 = __p1; \
- uint32x2x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
- __ret; \
- })
- #else
- #define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x4_t __s1 = __p1; \
- uint32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- uint32x2x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x4_t __s1 = __p1; \
- uint16x4x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
- __ret; \
- })
- #else
- #define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x4_t __s1 = __p1; \
- uint16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- uint16x4x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x4_t __s1 = __p1; \
- int8x8x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
- __ret; \
- })
- #else
- #define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x4_t __s1 = __p1; \
- int8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x4_t __s1 = __p1; \
- float32x2x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
- __ret; \
- })
- #else
- #define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x4_t __s1 = __p1; \
- float32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- float32x2x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x4_t __s1 = __p1; \
- float16x4x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
- __ret; \
- })
- #else
- #define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x4_t __s1 = __p1; \
- float16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- float16x4x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x4_t __s1 = __p1; \
- int32x2x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
- __ret; \
- })
- #else
- #define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x4_t __s1 = __p1; \
- int32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- int32x2x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x4_t __s1 = __p1; \
- int16x4x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
- __ret; \
- })
- #else
- #define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x4_t __s1 = __p1; \
- int16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- int16x4x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint8x16_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int8x16_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- float32x4_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint32x2_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint16x4_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- float32x2_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- float32x2_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlaq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint16x8_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlaq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x4_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- float32x4_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlaq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x8_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlaq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint32x2_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmla_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- uint32x2_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint16x4_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmla_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x2_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmla_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- float32x2_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x2_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmla_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int32x2_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x4_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmla_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
- float32x4_t __ret;
- __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
- int32x4_t __ret;
- __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
- int16x8_t __ret;
- __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
- uint32x2_t __ret;
- __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
- return __ret;
- }
- #else
- __ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
- uint16x4_t __ret;
- __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
- float32x2_t __ret;
- __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
- return __ret;
- }
- #else
- __ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int32x2_t __ret;
- __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
- return __ret;
- }
- #else
- __ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int16x4_t __ret;
- __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint8x16_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int8x16_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- float32x4_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint32x2_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint16x4_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- float32x2_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- float32x2_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlsq_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint16x8_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlsq_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x4_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlsq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- float32x4_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlsq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x8_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlsq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint32x2_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmls_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- uint32x2_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint16x4_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmls_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x2_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmls_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- float32x2_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x2_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int32x2_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x4_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmls_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
- float32x4_t __ret;
- __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
- int32x4_t __ret;
- __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
- int16x8_t __ret;
- __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
- uint32x2_t __ret;
- __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
- return __ret;
- }
- #else
- __ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
- uint16x4_t __ret;
- __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
- float32x2_t __ret;
- __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
- return __ret;
- }
- #else
- __ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int32x2_t __ret;
- __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
- return __ret;
- }
- #else
- __ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int16x4_t __ret;
- __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
- return __ret;
- }
- #else
- __ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vmov_n_p8(poly8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai poly8x8_t vmov_n_p8(poly8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vmov_n_p16(poly16_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai poly16x4_t vmov_n_p16(poly16_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vmovq_n_s8(int8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai int8x16_t vmovq_n_s8(int8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vmovq_n_f32(float32_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai float32x4_t vmovq_n_f32(float32_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmovq_n_f16(__p0) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16x8_t __ret; \
- __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
- __ret; \
- })
- #else
- #define vmovq_n_f16(__p0) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16x8_t __ret; \
- __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmovq_n_s32(int32_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai int32x4_t vmovq_n_s32(int32_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmovq_n_s64(int64_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai int64x2_t vmovq_n_s64(int64_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmovq_n_s16(int16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai int16x8_t vmovq_n_s16(int16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vmov_n_u8(uint8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai uint8x8_t vmov_n_u8(uint8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmov_n_u32(uint32_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai uint32x2_t vmov_n_u32(uint32_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vmov_n_u64(uint64_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) {__p0};
- return __ret;
- }
- #else
- __ai uint64x1_t vmov_n_u64(uint64_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) {__p0};
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmov_n_u16(uint16_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai uint16x4_t vmov_n_u16(uint16_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vmov_n_s8(int8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai int8x8_t vmov_n_s8(int8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmov_n_f32(float32_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai float32x2_t vmov_n_f32(float32_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmov_n_f16(__p0) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16x4_t __ret; \
- __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
- __ret; \
- })
- #else
- #define vmov_n_f16(__p0) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16x4_t __ret; \
- __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmov_n_s32(int32_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai int32x2_t vmov_n_s32(int32_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vmov_n_s64(int64_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) {__p0};
- return __ret;
- }
- #else
- __ai int64x1_t vmov_n_s64(int64_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) {__p0};
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmov_n_s16(int16_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
- return __ret;
- }
- #else
- __ai int16x4_t vmov_n_s16(int16_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmovl_s8(int8x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vmovl_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmovl_s32(int32x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vmovl_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmovl_s16(int16x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vmovl_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmovn_s32(int32x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vmovn_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmovn_s64(int64x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vmovn_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vmovn_s16(int16x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vmovn_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x2_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
- return __ret;
- }
- #else
- __ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
- return __ret;
- }
- #else
- __ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
- float32x4_t __ret;
- __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
- return __ret;
- }
- #else
- __ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
- int32x4_t __ret;
- __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
- return __ret;
- }
- #else
- __ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
- int16x8_t __ret;
- __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
- return __ret;
- }
- #else
- __ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
- uint32x2_t __ret;
- __ret = __p0 * (uint32x2_t) {__p1, __p1};
- return __ret;
- }
- #else
- __ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 * (uint32x2_t) {__p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
- uint16x4_t __ret;
- __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
- return __ret;
- }
- #else
- __ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
- float32x2_t __ret;
- __ret = __p0 * (float32x2_t) {__p1, __p1};
- return __ret;
- }
- #else
- __ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = __rev0 * (float32x2_t) {__p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
- int32x2_t __ret;
- __ret = __p0 * (int32x2_t) {__p1, __p1};
- return __ret;
- }
- #else
- __ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 * (int32x2_t) {__p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
- int16x4_t __ret;
- __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
- return __ret;
- }
- #else
- __ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly16x8_t __ret;
- __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
- return __ret;
- }
- #else
- __ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly16x8_t __ret;
- __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint64x2_t __ret; \
- __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint32x2_t) {__p1, __p1}, 51);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(uint16x4_t) {__p1, __p1, __p1, __p1}, 50);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
- poly8x8_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
- poly8x16_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
- uint8x16_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
- uint32x4_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
- uint16x8_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vmvnq_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai int8x16_t vmvnq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmvnq_s32(int32x4_t __p0) {
- int32x4_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai int32x4_t vmvnq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmvnq_s16(int16x8_t __p0) {
- int16x8_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai int16x8_t vmvnq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
- uint8x8_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
- uint32x2_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
- uint16x4_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vmvn_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai int8x8_t vmvn_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vmvn_s32(int32x2_t __p0) {
- int32x2_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai int32x2_t vmvn_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vmvn_s16(int16x4_t __p0) {
- int16x4_t __ret;
- __ret = ~__p0;
- return __ret;
- }
- #else
- __ai int16x4_t vmvn_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = ~__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vnegq_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai int8x16_t vnegq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vnegq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai float32x4_t vnegq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vnegq_s32(int32x4_t __p0) {
- int32x4_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai int32x4_t vnegq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vnegq_s16(int16x8_t __p0) {
- int16x8_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai int16x8_t vnegq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vneg_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai int8x8_t vneg_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vneg_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai float32x2_t vneg_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vneg_s32(int32x2_t __p0) {
- int32x2_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai int32x2_t vneg_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vneg_s16(int16x4_t __p0) {
- int16x4_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai int16x4_t vneg_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __p0 | ~__p1;
- return __ret;
- }
- #else
- __ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 | ~__rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __p0 | __p1;
- return __ret;
- }
- #else
- __ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 | __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vpaddl_s8(int8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vpaddl_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vpaddl_s32(int32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vpaddl_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vpaddl_s16(int16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vpaddl_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqabsq_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqabsq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqabsq_s32(int32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqabsq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqabsq_s16(int16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vqabsq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqabs_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqabs_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqabs_s32(int32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqabs_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqabs_s16(int16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqabs_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int32x2_t) {__p2, __p2}, 35);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)(int16x4_t) {__p2, __p2, __p2, __p2}, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __ret; \
- __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmull_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmull_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 35);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqmovn_s32(int32x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqmovn_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqmovn_s64(int64x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqmovn_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqmovn_s16(int16x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqmovn_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqnegq_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqnegq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqnegq_s32(int32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqnegq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqnegq_s16(int16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vqnegq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqneg_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqneg_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqneg_s32(int32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqneg_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqneg_s16(int16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqneg_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __ret; \
- __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int32x4_t) {__p1, __p1, __p1, __p1}, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)(int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int32x2_t) {__p1, __p1}, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)(int16x4_t) {__p1, __p1, __p1, __p1}, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
- __ret; \
- })
- #else
- #define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
- __ret; \
- })
- #else
- #define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
- __ret; \
- })
- #else
- #define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
- __ret; \
- })
- #else
- #define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
- __ret; \
- })
- #else
- #define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
- __ret; \
- })
- #else
- #define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
- __ret; \
- })
- #else
- #define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
- __ret; \
- })
- #else
- #define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
- __ret; \
- })
- #else
- #define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
- __ret; \
- })
- #else
- #define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
- __ret; \
- })
- #else
- #define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vqshl_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vqshl_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshl_n_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #else
- #define vqshl_n_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vqshl_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
- __ret; \
- })
- #else
- #define vqshl_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
- __ret; \
- })
- #else
- #define vqshl_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshl_n_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #else
- #define vqshl_n_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
- __ret; \
- })
- #else
- #define vqshl_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
- __ret; \
- })
- #else
- #define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
- __ret; \
- })
- #else
- #define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
- __ret; \
- })
- #else
- #define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
- __ret; \
- })
- #else
- #define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #else
- #define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
- __ret; \
- })
- #else
- #define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
- __ret; \
- })
- #else
- #define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
- __ret; \
- })
- #else
- #define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrecpe_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrecpe_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
- return __ret;
- }
- #else
- __ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
- return __ret;
- }
- #else
- __ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
- return __ret;
- }
- #else
- __ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vrev16q_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
- return __ret;
- }
- #else
- __ai int8x16_t vrev16q_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
- return __ret;
- }
- #else
- __ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vrev16_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
- return __ret;
- }
- #else
- __ai int8x8_t vrev16_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
- return __ret;
- }
- #else
- __ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
- return __ret;
- }
- #else
- __ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
- return __ret;
- }
- #else
- __ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
- return __ret;
- }
- #else
- __ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
- return __ret;
- }
- #else
- __ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vrev32q_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
- return __ret;
- }
- #else
- __ai int8x16_t vrev32q_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vrev32q_s16(int16x8_t __p0) {
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
- return __ret;
- }
- #else
- __ai int16x8_t vrev32q_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
- return __ret;
- }
- #else
- __ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
- return __ret;
- }
- #else
- __ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vrev32_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
- return __ret;
- }
- #else
- __ai int8x8_t vrev32_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vrev32_s16(int16x4_t __p0) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
- return __ret;
- }
- #else
- __ai int16x4_t vrev32_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #else
- __ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- return __ret;
- }
- #else
- __ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
- return __ret;
- }
- #else
- __ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
- return __ret;
- }
- #else
- __ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
- return __ret;
- }
- #else
- __ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
- return __ret;
- }
- #else
- __ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
- return __ret;
- }
- #else
- __ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vrev64q_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
- return __ret;
- }
- #else
- __ai int8x16_t vrev64q_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrev64q_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
- return __ret;
- }
- #else
- __ai float32x4_t vrev64q_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vrev64q_s32(int32x4_t __p0) {
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
- return __ret;
- }
- #else
- __ai int32x4_t vrev64q_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vrev64q_s16(int16x8_t __p0) {
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
- return __ret;
- }
- #else
- __ai int16x8_t vrev64q_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #else
- __ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
- return __ret;
- }
- #else
- __ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- return __ret;
- }
- #else
- __ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vrev64_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vrev64_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrev64_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
- return __ret;
- }
- #else
- __ai float32x2_t vrev64_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vrev64_s32(int32x2_t __p0) {
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
- return __ret;
- }
- #else
- __ai int32x2_t vrev64_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vrev64_s16(int16x4_t __p0) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- return __ret;
- }
- #else
- __ai int16x4_t vrev64_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
- __ret; \
- })
- #else
- #define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
- __ret; \
- })
- #else
- #define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
- __ret; \
- })
- #else
- #define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
- __ret; \
- })
- #else
- #define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
- __ret; \
- })
- #else
- #define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
- __ret; \
- })
- #else
- #define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
- __ret; \
- })
- #else
- #define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
- __ret; \
- })
- #else
- #define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vrshr_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vrshr_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshr_n_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #else
- #define vrshr_n_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vrshr_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
- __ret; \
- })
- #else
- #define vrshr_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
- __ret; \
- })
- #else
- #define vrshr_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshr_n_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #else
- #define vrshr_n_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
- __ret; \
- })
- #else
- #define vrshr_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
- __ret; \
- })
- #else
- #define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
- __ret; \
- })
- #else
- #define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
- __ret; \
- })
- #else
- #define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
- __ret; \
- })
- #else
- #define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
- __ret; \
- })
- #else
- #define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
- __ret; \
- })
- #else
- #define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
- __ret; \
- })
- #else
- #define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
- __ret; \
- })
- #else
- #define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
- __ret; \
- })
- #else
- #define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
- __ret; \
- })
- #else
- #define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
- __ret; \
- })
- #else
- #define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
- __ret; \
- })
- #else
- #define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
- __ret; \
- })
- #else
- #define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #else
- #define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
- __ret; \
- })
- #else
- #define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
- __ret; \
- })
- #else
- #define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
- __ret; \
- })
- #else
- #define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #else
- #define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
- __ret; \
- })
- #else
- #define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8_t __s0 = __p0; \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8_t __s0 = __p0; \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8_t __s0 = __p0; \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16_t __s0 = __p0; \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16_t __s0 = __p0; \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16_t __s0 = __p0; \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8_t __s0 = __p0; \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8_t __s0 = __p0; \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8_t __s0 = __p0; \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16_t __s0 = __p0; \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16_t __s0 = __p0; \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16_t __s0 = __p0; \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #define __noswap_vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #define __noswap_vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
- __ret; \
- })
- #else
- #define vshlq_n_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
- __ret; \
- })
- #else
- #define vshlq_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
- __ret; \
- })
- #else
- #define vshlq_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
- __ret; \
- })
- #else
- #define vshlq_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
- __ret; \
- })
- #else
- #define vshlq_n_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
- __ret; \
- })
- #else
- #define vshlq_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
- __ret; \
- })
- #else
- #define vshlq_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
- __ret; \
- })
- #else
- #define vshlq_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshl_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vshl_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshl_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vshl_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshl_n_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #else
- #define vshl_n_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshl_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vshl_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshl_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
- __ret; \
- })
- #else
- #define vshl_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshl_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
- __ret; \
- })
- #else
- #define vshl_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshl_n_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #else
- #define vshl_n_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshl_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
- __ret; \
- })
- #else
- #define vshl_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
- __ret; \
- })
- #else
- #define vshll_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
- __ret; \
- })
- #else
- #define vshll_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
- __ret; \
- })
- #else
- #define vshll_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
- __ret; \
- })
- #else
- #define vshll_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
- __ret; \
- })
- #else
- #define vshll_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
- __ret; \
- })
- #else
- #define vshll_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
- __ret; \
- })
- #else
- #define vshrq_n_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
- __ret; \
- })
- #else
- #define vshrq_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
- __ret; \
- })
- #else
- #define vshrq_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
- __ret; \
- })
- #else
- #define vshrq_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
- __ret; \
- })
- #else
- #define vshrq_n_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
- __ret; \
- })
- #else
- #define vshrq_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
- __ret; \
- })
- #else
- #define vshrq_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
- __ret; \
- })
- #else
- #define vshrq_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshr_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vshr_n_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshr_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vshr_n_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshr_n_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #else
- #define vshr_n_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshr_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vshr_n_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshr_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
- __ret; \
- })
- #else
- #define vshr_n_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshr_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
- __ret; \
- })
- #else
- #define vshr_n_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshr_n_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #else
- #define vshr_n_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshr_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
- __ret; \
- })
- #else
- #define vshr_n_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #else
- #define vshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #else
- #define vshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
- __ret; \
- })
- #else
- #define vshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
- __ret; \
- })
- #else
- #define vshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
- __ret; \
- })
- #else
- #define vshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
- __ret; \
- })
- #else
- #define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
- __ret; \
- })
- #else
- #define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
- __ret; \
- })
- #else
- #define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
- __ret; \
- })
- #else
- #define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
- __ret; \
- })
- #else
- #define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
- __ret; \
- })
- #else
- #define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
- __ret; \
- })
- #else
- #define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
- __ret; \
- })
- #else
- #define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
- __ret; \
- })
- #else
- #define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
- __ret; \
- })
- #else
- #define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
- __ret; \
- })
- #else
- #define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
- __ret; \
- })
- #else
- #define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
- __ret; \
- })
- #else
- #define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
- __ret; \
- })
- #else
- #define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #else
- #define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
- __ret; \
- })
- #else
- #define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
- __ret; \
- })
- #else
- #define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
- __ret; \
- })
- #else
- #define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #else
- #define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
- __ret; \
- })
- #else
- #define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
- __ret; \
- })
- #else
- #define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
- __ret; \
- })
- #else
- #define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
- __ret; \
- })
- #else
- #define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
- __ret; \
- })
- #else
- #define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
- __ret; \
- })
- #else
- #define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
- __ret; \
- })
- #else
- #define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
- __ret; \
- })
- #else
- #define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
- __ret; \
- })
- #else
- #define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
- __ret; \
- })
- #else
- #define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
- __ret; \
- })
- #else
- #define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #else
- #define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
- __ret; \
- })
- #else
- #define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
- __ret; \
- })
- #else
- #define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
- __ret; \
- })
- #else
- #define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #else
- #define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
- __ret; \
- })
- #else
- #define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
- __ret; \
- })
- #else
- #define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __ret; \
- __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
- __ret; \
- })
- #else
- #define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- poly16x4_t __ret; \
- __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
- __ret; \
- })
- #else
- #define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __ret; \
- __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
- __ret; \
- })
- #else
- #define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __ret; \
- __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
- __ret; \
- })
- #else
- #define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
- __ret; \
- })
- #else
- #define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
- __ret; \
- })
- #else
- #define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
- __ret; \
- })
- #else
- #define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
- __ret; \
- })
- #else
- #define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __s1 = __p1; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
- __ret; \
- })
- #else
- #define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
- __ret; \
- })
- #else
- #define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __s1 = __p1; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
- __ret; \
- })
- #else
- #define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
- __ret; \
- })
- #else
- #define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
- __ret; \
- })
- #else
- #define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __ret; \
- __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #else
- #define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64x1_t __s1 = __p1; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
- __ret; \
- })
- #else
- #define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
- __ret; \
- })
- #else
- #define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __s1 = __p1; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
- __ret; \
- })
- #else
- #define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __ret; \
- __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #else
- #define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64x1_t __s1 = __p1; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
- __ret; \
- })
- #else
- #define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
- })
- #else
- #define vst1_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
- })
- #else
- #define vst1_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
- })
- #else
- #define vst1q_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
- })
- #else
- #define vst1q_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
- })
- #else
- #define vst1q_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
- })
- #else
- #define vst1q_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
- })
- #else
- #define vst1q_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
- })
- #else
- #define vst1q_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
- })
- #else
- #define vst1q_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s1 = __p1; \
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
- })
- #else
- #define vst1q_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s1 = __p1; \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
- })
- #else
- #define vst1q_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s1 = __p1; \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
- })
- #else
- #define vst1q_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
- })
- #else
- #define vst1q_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s1 = __p1; \
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
- })
- #else
- #define vst1q_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
- })
- #else
- #define vst1_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
- })
- #else
- #define vst1_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
- })
- #else
- #define vst1_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
- })
- #else
- #define vst1_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
- })
- #else
- #define vst1_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s1 = __p1; \
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
- })
- #else
- #define vst1_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s1 = __p1; \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
- })
- #else
- #define vst1_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s1 = __p1; \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
- })
- #else
- #define vst1_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
- })
- #else
- #define vst1_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
- })
- #else
- #define vst1_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
- })
- #else
- #define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8_t __s1 = __p1; \
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
- })
- #else
- #define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4_t __s1 = __p1; \
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
- })
- #else
- #define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16_t __s1 = __p1; \
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
- })
- #else
- #define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8_t __s1 = __p1; \
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
- })
- #else
- #define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16_t __s1 = __p1; \
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
- })
- #else
- #define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
- })
- #else
- #define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2_t __s1 = __p1; \
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
- })
- #else
- #define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
- })
- #else
- #define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16_t __s1 = __p1; \
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
- })
- #else
- #define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s1 = __p1; \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
- })
- #else
- #define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s1 = __p1; \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
- })
- #else
- #define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
- })
- #else
- #define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2_t __s1 = __p1; \
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
- })
- #else
- #define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
- })
- #else
- #define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8_t __s1 = __p1; \
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
- })
- #else
- #define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
- })
- #else
- #define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
- })
- #else
- #define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
- })
- #else
- #define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8_t __s1 = __p1; \
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
- })
- #else
- #define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s1 = __p1; \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
- })
- #else
- #define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s1 = __p1; \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
- })
- #else
- #define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s1 = __p1; \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
- })
- #else
- #define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
- })
- #else
- #define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s1 = __p1; \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_p8(__p0, __p1) __extension__ ({ \
- poly8x8x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
- })
- #else
- #define vst2_p8(__p0, __p1) __extension__ ({ \
- poly8x8x2_t __s1 = __p1; \
- poly8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_p16(__p0, __p1) __extension__ ({ \
- poly16x4x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
- })
- #else
- #define vst2_p16(__p0, __p1) __extension__ ({ \
- poly16x4x2_t __s1 = __p1; \
- poly16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_p8(__p0, __p1) __extension__ ({ \
- poly8x16x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
- })
- #else
- #define vst2q_p8(__p0, __p1) __extension__ ({ \
- poly8x16x2_t __s1 = __p1; \
- poly8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_p16(__p0, __p1) __extension__ ({ \
- poly16x8x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
- })
- #else
- #define vst2q_p16(__p0, __p1) __extension__ ({ \
- poly16x8x2_t __s1 = __p1; \
- poly16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_u8(__p0, __p1) __extension__ ({ \
- uint8x16x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
- })
- #else
- #define vst2q_u8(__p0, __p1) __extension__ ({ \
- uint8x16x2_t __s1 = __p1; \
- uint8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_u32(__p0, __p1) __extension__ ({ \
- uint32x4x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
- })
- #else
- #define vst2q_u32(__p0, __p1) __extension__ ({ \
- uint32x4x2_t __s1 = __p1; \
- uint32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_u16(__p0, __p1) __extension__ ({ \
- uint16x8x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
- })
- #else
- #define vst2q_u16(__p0, __p1) __extension__ ({ \
- uint16x8x2_t __s1 = __p1; \
- uint16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_s8(__p0, __p1) __extension__ ({ \
- int8x16x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
- })
- #else
- #define vst2q_s8(__p0, __p1) __extension__ ({ \
- int8x16x2_t __s1 = __p1; \
- int8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_f32(__p0, __p1) __extension__ ({ \
- float32x4x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 41); \
- })
- #else
- #define vst2q_f32(__p0, __p1) __extension__ ({ \
- float32x4x2_t __s1 = __p1; \
- float32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_f16(__p0, __p1) __extension__ ({ \
- float16x8x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 40); \
- })
- #else
- #define vst2q_f16(__p0, __p1) __extension__ ({ \
- float16x8x2_t __s1 = __p1; \
- float16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_s32(__p0, __p1) __extension__ ({ \
- int32x4x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 34); \
- })
- #else
- #define vst2q_s32(__p0, __p1) __extension__ ({ \
- int32x4x2_t __s1 = __p1; \
- int32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_s16(__p0, __p1) __extension__ ({ \
- int16x8x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 33); \
- })
- #else
- #define vst2q_s16(__p0, __p1) __extension__ ({ \
- int16x8x2_t __s1 = __p1; \
- int16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_u8(__p0, __p1) __extension__ ({ \
- uint8x8x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
- })
- #else
- #define vst2_u8(__p0, __p1) __extension__ ({ \
- uint8x8x2_t __s1 = __p1; \
- uint8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_u32(__p0, __p1) __extension__ ({ \
- uint32x2x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
- })
- #else
- #define vst2_u32(__p0, __p1) __extension__ ({ \
- uint32x2x2_t __s1 = __p1; \
- uint32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_u64(__p0, __p1) __extension__ ({ \
- uint64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
- })
- #else
- #define vst2_u64(__p0, __p1) __extension__ ({ \
- uint64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_u16(__p0, __p1) __extension__ ({ \
- uint16x4x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
- })
- #else
- #define vst2_u16(__p0, __p1) __extension__ ({ \
- uint16x4x2_t __s1 = __p1; \
- uint16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_s8(__p0, __p1) __extension__ ({ \
- int8x8x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
- })
- #else
- #define vst2_s8(__p0, __p1) __extension__ ({ \
- int8x8x2_t __s1 = __p1; \
- int8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_f32(__p0, __p1) __extension__ ({ \
- float32x2x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 9); \
- })
- #else
- #define vst2_f32(__p0, __p1) __extension__ ({ \
- float32x2x2_t __s1 = __p1; \
- float32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_f16(__p0, __p1) __extension__ ({ \
- float16x4x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 8); \
- })
- #else
- #define vst2_f16(__p0, __p1) __extension__ ({ \
- float16x4x2_t __s1 = __p1; \
- float16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_s32(__p0, __p1) __extension__ ({ \
- int32x2x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 2); \
- })
- #else
- #define vst2_s32(__p0, __p1) __extension__ ({ \
- int32x2x2_t __s1 = __p1; \
- int32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_s64(__p0, __p1) __extension__ ({ \
- int64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
- })
- #else
- #define vst2_s64(__p0, __p1) __extension__ ({ \
- int64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_s16(__p0, __p1) __extension__ ({ \
- int16x4x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 1); \
- })
- #else
- #define vst2_s16(__p0, __p1) __extension__ ({ \
- int16x4x2_t __s1 = __p1; \
- int16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
- })
- #else
- #define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x2_t __s1 = __p1; \
- poly8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
- })
- #else
- #define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x2_t __s1 = __p1; \
- poly16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
- })
- #else
- #define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x2_t __s1 = __p1; \
- poly16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
- })
- #else
- #define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x2_t __s1 = __p1; \
- uint32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
- })
- #else
- #define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x2_t __s1 = __p1; \
- uint16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 41); \
- })
- #else
- #define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x2_t __s1 = __p1; \
- float32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 40); \
- })
- #else
- #define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x2_t __s1 = __p1; \
- float16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 34); \
- })
- #else
- #define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x2_t __s1 = __p1; \
- int32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 33); \
- })
- #else
- #define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x2_t __s1 = __p1; \
- int16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
- })
- #else
- #define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x2_t __s1 = __p1; \
- uint8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
- })
- #else
- #define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x2_t __s1 = __p1; \
- uint32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
- })
- #else
- #define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x2_t __s1 = __p1; \
- uint16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
- })
- #else
- #define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x2_t __s1 = __p1; \
- int8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 9); \
- })
- #else
- #define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x2_t __s1 = __p1; \
- float32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 8); \
- })
- #else
- #define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x2_t __s1 = __p1; \
- float16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 2); \
- })
- #else
- #define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x2_t __s1 = __p1; \
- int32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 1); \
- })
- #else
- #define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x2_t __s1 = __p1; \
- int16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst2_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_p8(__p0, __p1) __extension__ ({ \
- poly8x8x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
- })
- #else
- #define vst3_p8(__p0, __p1) __extension__ ({ \
- poly8x8x3_t __s1 = __p1; \
- poly8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_p16(__p0, __p1) __extension__ ({ \
- poly16x4x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
- })
- #else
- #define vst3_p16(__p0, __p1) __extension__ ({ \
- poly16x4x3_t __s1 = __p1; \
- poly16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_p8(__p0, __p1) __extension__ ({ \
- poly8x16x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
- })
- #else
- #define vst3q_p8(__p0, __p1) __extension__ ({ \
- poly8x16x3_t __s1 = __p1; \
- poly8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_p16(__p0, __p1) __extension__ ({ \
- poly16x8x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
- })
- #else
- #define vst3q_p16(__p0, __p1) __extension__ ({ \
- poly16x8x3_t __s1 = __p1; \
- poly16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_u8(__p0, __p1) __extension__ ({ \
- uint8x16x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
- })
- #else
- #define vst3q_u8(__p0, __p1) __extension__ ({ \
- uint8x16x3_t __s1 = __p1; \
- uint8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_u32(__p0, __p1) __extension__ ({ \
- uint32x4x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
- })
- #else
- #define vst3q_u32(__p0, __p1) __extension__ ({ \
- uint32x4x3_t __s1 = __p1; \
- uint32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_u16(__p0, __p1) __extension__ ({ \
- uint16x8x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
- })
- #else
- #define vst3q_u16(__p0, __p1) __extension__ ({ \
- uint16x8x3_t __s1 = __p1; \
- uint16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_s8(__p0, __p1) __extension__ ({ \
- int8x16x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
- })
- #else
- #define vst3q_s8(__p0, __p1) __extension__ ({ \
- int8x16x3_t __s1 = __p1; \
- int8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_f32(__p0, __p1) __extension__ ({ \
- float32x4x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
- })
- #else
- #define vst3q_f32(__p0, __p1) __extension__ ({ \
- float32x4x3_t __s1 = __p1; \
- float32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_f16(__p0, __p1) __extension__ ({ \
- float16x8x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
- })
- #else
- #define vst3q_f16(__p0, __p1) __extension__ ({ \
- float16x8x3_t __s1 = __p1; \
- float16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_s32(__p0, __p1) __extension__ ({ \
- int32x4x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
- })
- #else
- #define vst3q_s32(__p0, __p1) __extension__ ({ \
- int32x4x3_t __s1 = __p1; \
- int32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_s16(__p0, __p1) __extension__ ({ \
- int16x8x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
- })
- #else
- #define vst3q_s16(__p0, __p1) __extension__ ({ \
- int16x8x3_t __s1 = __p1; \
- int16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_u8(__p0, __p1) __extension__ ({ \
- uint8x8x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
- })
- #else
- #define vst3_u8(__p0, __p1) __extension__ ({ \
- uint8x8x3_t __s1 = __p1; \
- uint8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_u32(__p0, __p1) __extension__ ({ \
- uint32x2x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
- })
- #else
- #define vst3_u32(__p0, __p1) __extension__ ({ \
- uint32x2x3_t __s1 = __p1; \
- uint32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_u64(__p0, __p1) __extension__ ({ \
- uint64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
- })
- #else
- #define vst3_u64(__p0, __p1) __extension__ ({ \
- uint64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_u16(__p0, __p1) __extension__ ({ \
- uint16x4x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
- })
- #else
- #define vst3_u16(__p0, __p1) __extension__ ({ \
- uint16x4x3_t __s1 = __p1; \
- uint16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_s8(__p0, __p1) __extension__ ({ \
- int8x8x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
- })
- #else
- #define vst3_s8(__p0, __p1) __extension__ ({ \
- int8x8x3_t __s1 = __p1; \
- int8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_f32(__p0, __p1) __extension__ ({ \
- float32x2x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
- })
- #else
- #define vst3_f32(__p0, __p1) __extension__ ({ \
- float32x2x3_t __s1 = __p1; \
- float32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_f16(__p0, __p1) __extension__ ({ \
- float16x4x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
- })
- #else
- #define vst3_f16(__p0, __p1) __extension__ ({ \
- float16x4x3_t __s1 = __p1; \
- float16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_s32(__p0, __p1) __extension__ ({ \
- int32x2x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
- })
- #else
- #define vst3_s32(__p0, __p1) __extension__ ({ \
- int32x2x3_t __s1 = __p1; \
- int32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_s64(__p0, __p1) __extension__ ({ \
- int64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
- })
- #else
- #define vst3_s64(__p0, __p1) __extension__ ({ \
- int64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_s16(__p0, __p1) __extension__ ({ \
- int16x4x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
- })
- #else
- #define vst3_s16(__p0, __p1) __extension__ ({ \
- int16x4x3_t __s1 = __p1; \
- int16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
- })
- #else
- #define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x3_t __s1 = __p1; \
- poly8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
- })
- #else
- #define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x3_t __s1 = __p1; \
- poly16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
- })
- #else
- #define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x3_t __s1 = __p1; \
- poly16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
- })
- #else
- #define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x3_t __s1 = __p1; \
- uint32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
- })
- #else
- #define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x3_t __s1 = __p1; \
- uint16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 41); \
- })
- #else
- #define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x3_t __s1 = __p1; \
- float32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 40); \
- })
- #else
- #define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x3_t __s1 = __p1; \
- float16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 34); \
- })
- #else
- #define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x3_t __s1 = __p1; \
- int32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 33); \
- })
- #else
- #define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x3_t __s1 = __p1; \
- int16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
- })
- #else
- #define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x3_t __s1 = __p1; \
- uint8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
- })
- #else
- #define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x3_t __s1 = __p1; \
- uint32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
- })
- #else
- #define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x3_t __s1 = __p1; \
- uint16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
- })
- #else
- #define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x3_t __s1 = __p1; \
- int8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 9); \
- })
- #else
- #define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x3_t __s1 = __p1; \
- float32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 8); \
- })
- #else
- #define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x3_t __s1 = __p1; \
- float16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 2); \
- })
- #else
- #define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x3_t __s1 = __p1; \
- int32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 1); \
- })
- #else
- #define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x3_t __s1 = __p1; \
- int16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst3_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_p8(__p0, __p1) __extension__ ({ \
- poly8x8x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
- })
- #else
- #define vst4_p8(__p0, __p1) __extension__ ({ \
- poly8x8x4_t __s1 = __p1; \
- poly8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_p16(__p0, __p1) __extension__ ({ \
- poly16x4x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
- })
- #else
- #define vst4_p16(__p0, __p1) __extension__ ({ \
- poly16x4x4_t __s1 = __p1; \
- poly16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_p8(__p0, __p1) __extension__ ({ \
- poly8x16x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
- })
- #else
- #define vst4q_p8(__p0, __p1) __extension__ ({ \
- poly8x16x4_t __s1 = __p1; \
- poly8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_p16(__p0, __p1) __extension__ ({ \
- poly16x8x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
- })
- #else
- #define vst4q_p16(__p0, __p1) __extension__ ({ \
- poly16x8x4_t __s1 = __p1; \
- poly16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_u8(__p0, __p1) __extension__ ({ \
- uint8x16x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
- })
- #else
- #define vst4q_u8(__p0, __p1) __extension__ ({ \
- uint8x16x4_t __s1 = __p1; \
- uint8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_u32(__p0, __p1) __extension__ ({ \
- uint32x4x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
- })
- #else
- #define vst4q_u32(__p0, __p1) __extension__ ({ \
- uint32x4x4_t __s1 = __p1; \
- uint32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_u16(__p0, __p1) __extension__ ({ \
- uint16x8x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
- })
- #else
- #define vst4q_u16(__p0, __p1) __extension__ ({ \
- uint16x8x4_t __s1 = __p1; \
- uint16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_s8(__p0, __p1) __extension__ ({ \
- int8x16x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
- })
- #else
- #define vst4q_s8(__p0, __p1) __extension__ ({ \
- int8x16x4_t __s1 = __p1; \
- int8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_f32(__p0, __p1) __extension__ ({ \
- float32x4x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
- })
- #else
- #define vst4q_f32(__p0, __p1) __extension__ ({ \
- float32x4x4_t __s1 = __p1; \
- float32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_f16(__p0, __p1) __extension__ ({ \
- float16x8x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
- })
- #else
- #define vst4q_f16(__p0, __p1) __extension__ ({ \
- float16x8x4_t __s1 = __p1; \
- float16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_s32(__p0, __p1) __extension__ ({ \
- int32x4x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
- })
- #else
- #define vst4q_s32(__p0, __p1) __extension__ ({ \
- int32x4x4_t __s1 = __p1; \
- int32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_s16(__p0, __p1) __extension__ ({ \
- int16x8x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
- })
- #else
- #define vst4q_s16(__p0, __p1) __extension__ ({ \
- int16x8x4_t __s1 = __p1; \
- int16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_u8(__p0, __p1) __extension__ ({ \
- uint8x8x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
- })
- #else
- #define vst4_u8(__p0, __p1) __extension__ ({ \
- uint8x8x4_t __s1 = __p1; \
- uint8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_u32(__p0, __p1) __extension__ ({ \
- uint32x2x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
- })
- #else
- #define vst4_u32(__p0, __p1) __extension__ ({ \
- uint32x2x4_t __s1 = __p1; \
- uint32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_u64(__p0, __p1) __extension__ ({ \
- uint64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
- })
- #else
- #define vst4_u64(__p0, __p1) __extension__ ({ \
- uint64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_u16(__p0, __p1) __extension__ ({ \
- uint16x4x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
- })
- #else
- #define vst4_u16(__p0, __p1) __extension__ ({ \
- uint16x4x4_t __s1 = __p1; \
- uint16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_s8(__p0, __p1) __extension__ ({ \
- int8x8x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
- })
- #else
- #define vst4_s8(__p0, __p1) __extension__ ({ \
- int8x8x4_t __s1 = __p1; \
- int8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_f32(__p0, __p1) __extension__ ({ \
- float32x2x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
- })
- #else
- #define vst4_f32(__p0, __p1) __extension__ ({ \
- float32x2x4_t __s1 = __p1; \
- float32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_f16(__p0, __p1) __extension__ ({ \
- float16x4x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
- })
- #else
- #define vst4_f16(__p0, __p1) __extension__ ({ \
- float16x4x4_t __s1 = __p1; \
- float16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_s32(__p0, __p1) __extension__ ({ \
- int32x2x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
- })
- #else
- #define vst4_s32(__p0, __p1) __extension__ ({ \
- int32x2x4_t __s1 = __p1; \
- int32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_s64(__p0, __p1) __extension__ ({ \
- int64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
- })
- #else
- #define vst4_s64(__p0, __p1) __extension__ ({ \
- int64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_s16(__p0, __p1) __extension__ ({ \
- int16x4x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
- })
- #else
- #define vst4_s16(__p0, __p1) __extension__ ({ \
- int16x4x4_t __s1 = __p1; \
- int16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
- })
- #else
- #define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x8x4_t __s1 = __p1; \
- poly8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
- })
- #else
- #define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x4x4_t __s1 = __p1; \
- poly16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
- })
- #else
- #define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
- poly16x8x4_t __s1 = __p1; \
- poly16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
- })
- #else
- #define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4x4_t __s1 = __p1; \
- uint32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
- })
- #else
- #define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8x4_t __s1 = __p1; \
- uint16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 41); \
- })
- #else
- #define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4x4_t __s1 = __p1; \
- float32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 40); \
- })
- #else
- #define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8x4_t __s1 = __p1; \
- float16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 34); \
- })
- #else
- #define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4x4_t __s1 = __p1; \
- int32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 33); \
- })
- #else
- #define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8x4_t __s1 = __p1; \
- int16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
- })
- #else
- #define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x8x4_t __s1 = __p1; \
- uint8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
- })
- #else
- #define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2x4_t __s1 = __p1; \
- uint32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
- })
- #else
- #define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4x4_t __s1 = __p1; \
- uint16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
- })
- #else
- #define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x8x4_t __s1 = __p1; \
- int8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 9); \
- })
- #else
- #define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2x4_t __s1 = __p1; \
- float32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 8); \
- })
- #else
- #define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4x4_t __s1 = __p1; \
- float16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 2); \
- })
- #else
- #define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2x4_t __s1 = __p1; \
- int32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 1); \
- })
- #else
- #define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4x4_t __s1 = __p1; \
- int16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst4_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint16x8_t __ret;
- __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint64x2_t __ret;
- __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint32x4_t __ret;
- __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
- int16x8_t __ret;
- __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
- return __ret;
- }
- #else
- __ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
- int64x2_t __ret;
- __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
- return __ret;
- }
- #else
- __ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
- int32x4_t __ret;
- __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
- return __ret;
- }
- #else
- __ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 - vmovl_u8(__p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 - __noswap_vmovl_u8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 - vmovl_u32(__p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 - __noswap_vmovl_u32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 - vmovl_u16(__p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 - __noswap_vmovl_u16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
- int16x8_t __ret;
- __ret = __p0 - vmovl_s8(__p1);
- return __ret;
- }
- #else
- __ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 - __noswap_vmovl_s8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
- int64x2_t __ret;
- __ret = __p0 - vmovl_s32(__p1);
- return __ret;
- }
- #else
- __ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 - __noswap_vmovl_s32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
- int32x4_t __ret;
- __ret = __p0 - vmovl_s16(__p1);
- return __ret;
- }
- #else
- __ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 - __noswap_vmovl_s16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
- poly8x8x2_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
- uint8x8x2_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
- int8x8x2_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
- poly8x8x3_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
- uint8x8x3_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
- int8x8x3_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
- poly8x8x4_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
- uint8x8x4_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
- int8x8x4_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8x2_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8x2_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8x2_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8x3_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8x3_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8x3_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8x4_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8x4_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8x4_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
- return __ret;
- }
- #else
- __ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x4x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
- return __ret;
- }
- #else
- __ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
- return __ret;
- }
- #else
- __ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
- return __ret;
- }
- #else
- __ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x4x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
- return __ret;
- }
- #else
- __ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
- return __ret;
- }
- #else
- __ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
- return __ret;
- }
- #else
- __ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x4x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
- return __ret;
- }
- #else
- __ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
- return __ret;
- }
- #else
- __ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #if !defined(__aarch64__)
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #endif
- #if (__ARM_FP & 2)
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 8);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 41);
- return __ret;
- }
- #endif
- #endif
- #if __ARM_ARCH >= 8
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #endif
- #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrndq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrndq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrnd_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrnd_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrndaq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrndaq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrnda_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrnda_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrndmq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrndmq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrndm_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrndm_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrndnq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrndnq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrndn_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrndn_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrndpq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrndpq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrndp_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrndp_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrndxq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrndxq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrndx_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrndx_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #endif
- #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #endif
- #if __ARM_ARCH >= 8 && defined(__aarch64__)
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
- poly16x4_t __ret;
- __ret = (poly16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #else
- __ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
- poly128_t __ret;
- __ret = (poly128_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
- poly16x8_t __ret;
- __ret = (poly16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #else
- __ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #else
- __ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
- int32x2_t __ret;
- __ret = (int32x2_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #else
- __ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t)(__p0);
- return __ret;
- }
- #endif
- #endif
- #if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrndq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrndq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrnd_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrnd_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrndaq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrndaq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrnda_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrnda_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrndiq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrndiq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vrndiq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vrndiq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrndi_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrndi_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vrndi_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vrndi_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrndmq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrndmq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrndm_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrndm_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrndnq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrndnq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrndn_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrndn_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrndpq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrndpq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrndp_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrndp_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrndxq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrndxq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrndx_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrndx_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #endif
- #if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #endif
- #endif
- #if __ARM_FEATURE_CRYPTO
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
- return __ret;
- }
- #else
- __ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vsha1h_u32(uint32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vsha1h_u32(uint32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
- return __ret;
- }
- #else
- __ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__p0, __p1, (int8x16_t)__p2);
- return __ret;
- }
- #else
- __ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32((int8x16_t)__rev0, __p1, (int8x16_t)__rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #endif
- #if defined(__ARM_FEATURE_FMA)
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- float32x4_t __ret;
- __ret = vfmaq_f32(__p0, -__p1, __p2);
- return __ret;
- }
- #else
- __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- float32x2_t __ret;
- __ret = vfma_f32(__p0, -__p1, __p2);
- return __ret;
- }
- #else
- __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- float32x2_t __ret;
- __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #endif
- #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vabsq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vabsq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vabs_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vabs_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vceqz_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vceqz_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcgez_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcgez_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vclezq_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vclezq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vclez_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vclez_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcltz_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcltz_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
- __ret; \
- })
- #else
- #define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
- __ret; \
- })
- #else
- #define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
- __ret; \
- })
- #else
- #define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
- __ret; \
- })
- #else
- #define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
- __ret; \
- })
- #else
- #define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
- __ret; \
- })
- #else
- #define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
- __ret; \
- })
- #else
- #define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = __p0 / __p1;
- return __ret;
- }
- #else
- __ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __rev0 / __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = __p0 / __p1;
- return __ret;
- }
- #else
- __ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __rev0 / __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vduph_lane_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vduph_lane_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vduph_lane_f16((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vduph_laneq_f16((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
- __ret; \
- })
- #else
- #define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
- __ret; \
- })
- #else
- #define vext_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x4_t __s2 = __p2; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x4_t __s2 = __p2; \
- float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__rev2, __p3); \
- __ret; \
- })
- #define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x4_t __s2 = __p2; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x4_t __s2 = __p2; \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
- __ret; \
- })
- #else
- #define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x4_t __s2 = __p2; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x4_t __s2 = __p2; \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x4_t __s2 = __p2; \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
- __ret; \
- })
- #else
- #define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x4_t __s2 = __p2; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x4_t __s2 = __p2; \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x8_t __s2 = __p2; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x8_t __s2 = __p2; \
- float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__rev2, __p3); \
- __ret; \
- })
- #define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x8_t __s2 = __p2; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (int8x16_t)__s2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x8_t __s2 = __p2; \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
- __ret; \
- })
- #else
- #define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x8_t __s2 = __p2; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x8_t __s2 = __p2; \
- float16x8_t __ret; \
- __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x8_t __s2 = __p2; \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
- __ret; \
- })
- #else
- #define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x8_t __s2 = __p2; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x8_t __s2 = __p2; \
- float16x4_t __ret; \
- __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16_t __s2 = __p2; \
- float16x8_t __ret; \
- __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
- __ret; \
- })
- #else
- #define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16_t __s2 = __p2; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16_t __s2 = __p2; \
- float16x4_t __ret; \
- __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
- __ret; \
- })
- #else
- #define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16_t __s2 = __p2; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
- float16x8_t __ret;
- __ret = vfmaq_f16(__p0, -__p1, __p2);
- return __ret;
- }
- #else
- __ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
- float16x4_t __ret;
- __ret = vfma_f16(__p0, -__p1, __p2);
- return __ret;
- }
- #else
- __ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsh_lane_f16(__p0_0, __p1_0, __p2_0, __p3_0) __extension__ ({ \
- float16_t __s0_0 = __p0_0; \
- float16_t __s1_0 = __p1_0; \
- float16x4_t __s2_0 = __p2_0; \
- float16_t __ret_0; \
- __ret_0 = vfmah_lane_f16(__s0_0, -__s1_0, __s2_0, __p3_0); \
- __ret_0; \
- })
- #else
- #define vfmsh_lane_f16(__p0_1, __p1_1, __p2_1, __p3_1) __extension__ ({ \
- float16_t __s0_1 = __p0_1; \
- float16_t __s1_1 = __p1_1; \
- float16x4_t __s2_1 = __p2_1; \
- float16x4_t __rev2_1; __rev2_1 = __builtin_shufflevector(__s2_1, __s2_1, 3, 2, 1, 0); \
- float16_t __ret_1; \
- __ret_1 = __noswap_vfmah_lane_f16(__s0_1, -__s1_1, __rev2_1, __p3_1); \
- __ret_1; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsq_lane_f16(__p0_2, __p1_2, __p2_2, __p3_2) __extension__ ({ \
- float16x8_t __s0_2 = __p0_2; \
- float16x8_t __s1_2 = __p1_2; \
- float16x4_t __s2_2 = __p2_2; \
- float16x8_t __ret_2; \
- __ret_2 = vfmaq_lane_f16(__s0_2, -__s1_2, __s2_2, __p3_2); \
- __ret_2; \
- })
- #else
- #define vfmsq_lane_f16(__p0_3, __p1_3, __p2_3, __p3_3) __extension__ ({ \
- float16x8_t __s0_3 = __p0_3; \
- float16x8_t __s1_3 = __p1_3; \
- float16x4_t __s2_3 = __p2_3; \
- float16x8_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev1_3; __rev1_3 = __builtin_shufflevector(__s1_3, __s1_3, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x4_t __rev2_3; __rev2_3 = __builtin_shufflevector(__s2_3, __s2_3, 3, 2, 1, 0); \
- float16x8_t __ret_3; \
- __ret_3 = __noswap_vfmaq_lane_f16(__rev0_3, -__rev1_3, __rev2_3, __p3_3); \
- __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_3; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfms_lane_f16(__p0_4, __p1_4, __p2_4, __p3_4) __extension__ ({ \
- float16x4_t __s0_4 = __p0_4; \
- float16x4_t __s1_4 = __p1_4; \
- float16x4_t __s2_4 = __p2_4; \
- float16x4_t __ret_4; \
- __ret_4 = vfma_lane_f16(__s0_4, -__s1_4, __s2_4, __p3_4); \
- __ret_4; \
- })
- #else
- #define vfms_lane_f16(__p0_5, __p1_5, __p2_5, __p3_5) __extension__ ({ \
- float16x4_t __s0_5 = __p0_5; \
- float16x4_t __s1_5 = __p1_5; \
- float16x4_t __s2_5 = __p2_5; \
- float16x4_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 3, 2, 1, 0); \
- float16x4_t __rev1_5; __rev1_5 = __builtin_shufflevector(__s1_5, __s1_5, 3, 2, 1, 0); \
- float16x4_t __rev2_5; __rev2_5 = __builtin_shufflevector(__s2_5, __s2_5, 3, 2, 1, 0); \
- float16x4_t __ret_5; \
- __ret_5 = __noswap_vfma_lane_f16(__rev0_5, -__rev1_5, __rev2_5, __p3_5); \
- __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 3, 2, 1, 0); \
- __ret_5; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsh_laneq_f16(__p0_6, __p1_6, __p2_6, __p3_6) __extension__ ({ \
- float16_t __s0_6 = __p0_6; \
- float16_t __s1_6 = __p1_6; \
- float16x8_t __s2_6 = __p2_6; \
- float16_t __ret_6; \
- __ret_6 = vfmah_laneq_f16(__s0_6, -__s1_6, __s2_6, __p3_6); \
- __ret_6; \
- })
- #else
- #define vfmsh_laneq_f16(__p0_7, __p1_7, __p2_7, __p3_7) __extension__ ({ \
- float16_t __s0_7 = __p0_7; \
- float16_t __s1_7 = __p1_7; \
- float16x8_t __s2_7 = __p2_7; \
- float16x8_t __rev2_7; __rev2_7 = __builtin_shufflevector(__s2_7, __s2_7, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16_t __ret_7; \
- __ret_7 = __noswap_vfmah_laneq_f16(__s0_7, -__s1_7, __rev2_7, __p3_7); \
- __ret_7; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsq_laneq_f16(__p0_8, __p1_8, __p2_8, __p3_8) __extension__ ({ \
- float16x8_t __s0_8 = __p0_8; \
- float16x8_t __s1_8 = __p1_8; \
- float16x8_t __s2_8 = __p2_8; \
- float16x8_t __ret_8; \
- __ret_8 = vfmaq_laneq_f16(__s0_8, -__s1_8, __s2_8, __p3_8); \
- __ret_8; \
- })
- #else
- #define vfmsq_laneq_f16(__p0_9, __p1_9, __p2_9, __p3_9) __extension__ ({ \
- float16x8_t __s0_9 = __p0_9; \
- float16x8_t __s1_9 = __p1_9; \
- float16x8_t __s2_9 = __p2_9; \
- float16x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev1_9; __rev1_9 = __builtin_shufflevector(__s1_9, __s1_9, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev2_9; __rev2_9 = __builtin_shufflevector(__s2_9, __s2_9, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret_9; \
- __ret_9 = __noswap_vfmaq_laneq_f16(__rev0_9, -__rev1_9, __rev2_9, __p3_9); \
- __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_9; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfms_laneq_f16(__p0_10, __p1_10, __p2_10, __p3_10) __extension__ ({ \
- float16x4_t __s0_10 = __p0_10; \
- float16x4_t __s1_10 = __p1_10; \
- float16x8_t __s2_10 = __p2_10; \
- float16x4_t __ret_10; \
- __ret_10 = vfma_laneq_f16(__s0_10, -__s1_10, __s2_10, __p3_10); \
- __ret_10; \
- })
- #else
- #define vfms_laneq_f16(__p0_11, __p1_11, __p2_11, __p3_11) __extension__ ({ \
- float16x4_t __s0_11 = __p0_11; \
- float16x4_t __s1_11 = __p1_11; \
- float16x8_t __s2_11 = __p2_11; \
- float16x4_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \
- float16x4_t __rev1_11; __rev1_11 = __builtin_shufflevector(__s1_11, __s1_11, 3, 2, 1, 0); \
- float16x8_t __rev2_11; __rev2_11 = __builtin_shufflevector(__s2_11, __s2_11, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x4_t __ret_11; \
- __ret_11 = __noswap_vfma_laneq_f16(__rev0_11, -__rev1_11, __rev2_11, __p3_11); \
- __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
- __ret_11; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16_t __s2 = __p2; \
- float16x8_t __ret; \
- __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
- __ret; \
- })
- #else
- #define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16_t __s2 = __p2; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16_t __s2 = __p2; \
- float16x4_t __ret; \
- __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
- __ret; \
- })
- #else
- #define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16_t __s2 = __p2; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmaxnmvq_f16(__p0) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
- __ret; \
- })
- #else
- #define vmaxnmvq_f16(__p0) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmaxnmv_f16(__p0) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
- __ret; \
- })
- #else
- #define vmaxnmv_f16(__p0) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmaxvq_f16(__p0) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
- __ret; \
- })
- #else
- #define vmaxvq_f16(__p0) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmaxv_f16(__p0) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
- __ret; \
- })
- #else
- #define vmaxv_f16(__p0) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vminnmvq_f16(__p0) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
- __ret; \
- })
- #else
- #define vminnmvq_f16(__p0) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vminnmv_f16(__p0) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
- __ret; \
- })
- #else
- #define vminnmv_f16(__p0) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vminvq_f16(__p0) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
- __ret; \
- })
- #else
- #define vminvq_f16(__p0) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vminv_f16(__p0) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
- __ret; \
- })
- #else
- #define vminv_f16(__p0) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16_t __ret; \
- __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x8_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x8_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_n_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x8_t __ret; \
- __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
- __ret; \
- })
- #else
- #define vmulq_n_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_n_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x4_t __ret; \
- __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
- __ret; \
- })
- #else
- #define vmul_n_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x8_t __ret; \
- __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmulxq_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x4_t __ret; \
- __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmulx_lane_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __s1 = __p1; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x8_t __ret; \
- __ret = vmulxq_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmulxq_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = __noswap_vmulxq_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x4_t __ret; \
- __ret = vmulx_f16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmulx_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x8_t __s1 = __p1; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = __noswap_vmulx_f16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x8_t __ret; \
- __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
- __ret; \
- })
- #else
- #define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulx_n_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x4_t __ret; \
- __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
- __ret; \
- })
- #else
- #define vmulx_n_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16_t __s1 = __p1; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vnegq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai float16x8_t vnegq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vneg_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai float16x4_t vneg_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrecpe_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrecpe_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrev64q_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
- return __ret;
- }
- #else
- __ai float16x8_t vrev64q_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrev64_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- return __ret;
- }
- #else
- __ai float16x4_t vrev64_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrndq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrndq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrnd_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrnd_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrndaq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrndaq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrnda_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrnda_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrndiq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrndiq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrndi_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrndi_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrndmq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrndmq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrndm_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrndm_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrndnq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrndnq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrndn_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrndn_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrndpq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrndpq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrndp_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrndp_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrndxq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrndxq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrndx_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrndx_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
- return __ret;
- }
- #else
- __ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vsqrt_f16(float16x4_t __p0) {
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
- return __ret;
- }
- #else
- __ai float16x4_t vsqrt_f16(float16x4_t __p0) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8x2_t __ret;
- __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4x2_t __ret;
- __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
- return __ret;
- }
- #else
- __ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
- return __ret;
- }
- #else
- __ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
- return __ret;
- }
- #else
- __ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
- return __ret;
- }
- #else
- __ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8x2_t __ret;
- __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4x2_t __ret;
- __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
- return __ret;
- }
- #else
- __ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
- return __ret;
- }
- #else
- __ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
- return __ret;
- }
- #else
- __ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
- return __ret;
- }
- #else
- __ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
- return __ret;
- }
- #else
- __ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8x2_t __ret;
- __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
- return __ret;
- }
- #else
- __ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4x2_t __ret;
- __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
- return __ret;
- }
- #else
- __ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
- return __ret;
- }
- #else
- __ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
- return __ret;
- }
- #else
- __ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
- return __ret;
- }
- #else
- __ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #endif
- #if defined(__ARM_FEATURE_QRDMX)
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __ret;
- __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
- return __ret;
- }
- #else
- __ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __ret;
- __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
- return __ret;
- }
- #else
- __ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __ret;
- __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
- return __ret;
- }
- #else
- __ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int32x2_t __ret;
- __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __ret;
- __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
- return __ret;
- }
- #else
- __ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlahq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x8_t __ret; \
- __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlahq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x2_t __ret; \
- __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlah_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int32x2_t __ret; \
- __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x4_t __ret; \
- __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlah_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __ret;
- __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
- return __ret;
- }
- #else
- __ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __ret;
- __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
- return __ret;
- }
- #else
- __ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __ret;
- __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
- return __ret;
- }
- #else
- __ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int32x2_t __ret;
- __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __ret;
- __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
- return __ret;
- }
- #else
- __ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlshq_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x8_t __ret; \
- __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlshq_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x2_t __ret; \
- __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlsh_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int32x2_t __ret; \
- __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x4_t __ret; \
- __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlsh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #endif
- #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqaddq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlahq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x8_t __ret; \
- __ret = vqaddq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlahq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x2_t __ret; \
- __ret = vqadd_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlah_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x2_t __ret; \
- __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x4_t __ret; \
- __ret = vqadd_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlah_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqsubq_s32(__s0, vqrdmulhq_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlshq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x8_t __ret; \
- __ret = vqsubq_s16(__s0, vqrdmulhq_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlshq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x2_t __ret; \
- __ret = vqsub_s32(__s0, vqrdmulh_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlsh_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x2_t __ret; \
- __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x4_t __ret; \
- __ret = vqsub_s16(__s0, vqrdmulh_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3))); \
- __ret; \
- })
- #else
- #define vqrdmlsh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3))); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #endif
- #if defined(__aarch64__)
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vabsq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vabsq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vabsq_s64(int64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vabsq_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vabs_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vabs_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vabs_s64(int64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vabs_s64(int64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vabsd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vabsd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __rev0 + __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #else
- __ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = __p0 + __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint16x8_t __ret;
- __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
- uint32x4_t __ret;
- __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint8x16_t __ret;
- __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int16x8_t __ret;
- __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
- return __ret;
- }
- #else
- __ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
- int32x4_t __ret;
- __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
- return __ret;
- }
- #else
- __ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int8x16_t __ret;
- __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
- return __ret;
- }
- #else
- __ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vaddlvq_u8((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vaddlvq_u32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vaddlvq_u16((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vaddlvq_s8(int8x16_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int16_t vaddlvq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vaddlvq_s8((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vaddlvq_s32(int32x4_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int64_t vaddlvq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vaddlvq_s32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vaddlvq_s16(int16x8_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int32_t vaddlvq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vaddlvq_s16((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint16_t vaddlv_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vaddlv_u8((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint64_t vaddlv_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vaddlv_u32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint32_t vaddlv_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vaddlv_u16((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vaddlv_s8(int8x8_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int16_t vaddlv_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vaddlv_s8((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vaddlv_s32(int32x2_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int64_t vaddlv_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vaddlv_s32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vaddlv_s16(int16x4_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int32_t vaddlv_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vaddlv_s16((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint8_t vaddvq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vaddvq_u8((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint32_t vaddvq_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vaddvq_u32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint64_t vaddvq_u64(uint64x2_t __p0) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vaddvq_u64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint16_t vaddvq_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vaddvq_u16((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vaddvq_s8(int8x16_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int8_t vaddvq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vaddvq_s8((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vaddvq_f64(float64x2_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float64_t vaddvq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vaddvq_f64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vaddvq_f32(float32x4_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vaddvq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vaddvq_f32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vaddvq_s32(int32x4_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int32_t vaddvq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vaddvq_s32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vaddvq_s64(int64x2_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int64_t vaddvq_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vaddvq_s64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vaddvq_s16(int16x8_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int16_t vaddvq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vaddvq_s16((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vaddv_u8(uint8x8_t __p0) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint8_t vaddv_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vaddv_u8((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vaddv_u32(uint32x2_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint32_t vaddv_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vaddv_u32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vaddv_u16(uint16x4_t __p0) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint16_t vaddv_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vaddv_u16((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vaddv_s8(int8x8_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int8_t vaddv_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vaddv_s8((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vaddv_f32(float32x2_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vaddv_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vaddv_f32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vaddv_s32(int32x2_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int32_t vaddv_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vaddv_s32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vaddv_s16(int16x4_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int16_t vaddv_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vaddv_s16((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
- poly64x1_t __ret;
- __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
- return __ret;
- }
- #else
- __ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
- poly64x1_t __ret;
- __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
- poly64x2_t __ret;
- __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
- return __ret;
- }
- #else
- __ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- poly64x2_t __ret;
- __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 == __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 == __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 == __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 == __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 == __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 == __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vceqz_s8(int8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vceqz_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vceqz_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vceqz_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vceqz_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vceqz_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vceqz_s32(int32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vceqz_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vceqz_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vceqz_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vceqz_s16(int16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vceqz_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vceqzd_u64(uint64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vceqzd_u64(uint64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vceqzd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vceqzd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vceqzd_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vceqzd_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vceqzs_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vceqzs_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 >= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 >= __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 >= __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 >= __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 >= __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcgez_s8(int8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vcgez_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcgez_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcgez_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcgez_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcgez_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcgez_s32(int32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcgez_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcgez_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcgez_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcgez_s16(int16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcgez_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcgezd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vcgezd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcgezd_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vcgezd_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcgezs_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vcgezs_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 > __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 > __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 > __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 > __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 > __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcgtzd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vcgtzd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcgtzd_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vcgtzd_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcgtzs_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vcgtzs_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 <= __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 <= __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 <= __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 <= __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 <= __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vclezq_s8(int8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vclezq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vclezq_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vclezq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vclezq_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vclezq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vclezq_s32(int32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vclezq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vclezq_s64(int64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vclezq_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vclezq_s16(int16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vclezq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vclez_s8(int8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vclez_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vclez_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vclez_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vclez_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vclez_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vclez_s32(int32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vclez_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vclez_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vclez_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vclez_s16(int16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vclez_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vclezd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vclezd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vclezd_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vclezd_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vclezs_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vclezs_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__rev0 < __rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 < __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 < __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 < __p1);
- return __ret;
- }
- #else
- __ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t)(__p0 < __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vcltz_s8(int8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vcltz_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcltz_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcltz_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcltz_f32(float32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcltz_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vcltz_s32(int32x2_t __p0) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vcltz_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcltz_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcltz_s64(int64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vcltz_s16(int16x4_t __p0) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vcltz_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcltzd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vcltzd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcltzd_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vcltzd_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcltzs_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vcltzs_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
- return __ret;
- }
- #else
- __ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
- return __ret;
- }
- #else
- __ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_p8(__p0_12, __p1_12, __p2_12, __p3_12) __extension__ ({ \
- poly8x16_t __s0_12 = __p0_12; \
- poly8x8_t __s2_12 = __p2_12; \
- poly8x16_t __ret_12; \
- __ret_12 = vsetq_lane_p8(vget_lane_p8(__s2_12, __p3_12), __s0_12, __p1_12); \
- __ret_12; \
- })
- #else
- #define vcopyq_lane_p8(__p0_13, __p1_13, __p2_13, __p3_13) __extension__ ({ \
- poly8x16_t __s0_13 = __p0_13; \
- poly8x8_t __s2_13 = __p2_13; \
- poly8x16_t __rev0_13; __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __rev2_13; __rev2_13 = __builtin_shufflevector(__s2_13, __s2_13, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __ret_13; \
- __ret_13 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_13, __p3_13), __rev0_13, __p1_13); \
- __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_13; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_p16(__p0_14, __p1_14, __p2_14, __p3_14) __extension__ ({ \
- poly16x8_t __s0_14 = __p0_14; \
- poly16x4_t __s2_14 = __p2_14; \
- poly16x8_t __ret_14; \
- __ret_14 = vsetq_lane_p16(vget_lane_p16(__s2_14, __p3_14), __s0_14, __p1_14); \
- __ret_14; \
- })
- #else
- #define vcopyq_lane_p16(__p0_15, __p1_15, __p2_15, __p3_15) __extension__ ({ \
- poly16x8_t __s0_15 = __p0_15; \
- poly16x4_t __s2_15 = __p2_15; \
- poly16x8_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x4_t __rev2_15; __rev2_15 = __builtin_shufflevector(__s2_15, __s2_15, 3, 2, 1, 0); \
- poly16x8_t __ret_15; \
- __ret_15 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_15, __p3_15), __rev0_15, __p1_15); \
- __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_15; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_u8(__p0_16, __p1_16, __p2_16, __p3_16) __extension__ ({ \
- uint8x16_t __s0_16 = __p0_16; \
- uint8x8_t __s2_16 = __p2_16; \
- uint8x16_t __ret_16; \
- __ret_16 = vsetq_lane_u8(vget_lane_u8(__s2_16, __p3_16), __s0_16, __p1_16); \
- __ret_16; \
- })
- #else
- #define vcopyq_lane_u8(__p0_17, __p1_17, __p2_17, __p3_17) __extension__ ({ \
- uint8x16_t __s0_17 = __p0_17; \
- uint8x8_t __s2_17 = __p2_17; \
- uint8x16_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __rev2_17; __rev2_17 = __builtin_shufflevector(__s2_17, __s2_17, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret_17; \
- __ret_17 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_17, __p3_17), __rev0_17, __p1_17); \
- __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_17; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_u32(__p0_18, __p1_18, __p2_18, __p3_18) __extension__ ({ \
- uint32x4_t __s0_18 = __p0_18; \
- uint32x2_t __s2_18 = __p2_18; \
- uint32x4_t __ret_18; \
- __ret_18 = vsetq_lane_u32(vget_lane_u32(__s2_18, __p3_18), __s0_18, __p1_18); \
- __ret_18; \
- })
- #else
- #define vcopyq_lane_u32(__p0_19, __p1_19, __p2_19, __p3_19) __extension__ ({ \
- uint32x4_t __s0_19 = __p0_19; \
- uint32x2_t __s2_19 = __p2_19; \
- uint32x4_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 3, 2, 1, 0); \
- uint32x2_t __rev2_19; __rev2_19 = __builtin_shufflevector(__s2_19, __s2_19, 1, 0); \
- uint32x4_t __ret_19; \
- __ret_19 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_19, __p3_19), __rev0_19, __p1_19); \
- __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
- __ret_19; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_u64(__p0_20, __p1_20, __p2_20, __p3_20) __extension__ ({ \
- uint64x2_t __s0_20 = __p0_20; \
- uint64x1_t __s2_20 = __p2_20; \
- uint64x2_t __ret_20; \
- __ret_20 = vsetq_lane_u64(vget_lane_u64(__s2_20, __p3_20), __s0_20, __p1_20); \
- __ret_20; \
- })
- #else
- #define vcopyq_lane_u64(__p0_21, __p1_21, __p2_21, __p3_21) __extension__ ({ \
- uint64x2_t __s0_21 = __p0_21; \
- uint64x1_t __s2_21 = __p2_21; \
- uint64x2_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
- uint64x2_t __ret_21; \
- __ret_21 = __noswap_vsetq_lane_u64(__noswap_vget_lane_u64(__s2_21, __p3_21), __rev0_21, __p1_21); \
- __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 1, 0); \
- __ret_21; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_u16(__p0_22, __p1_22, __p2_22, __p3_22) __extension__ ({ \
- uint16x8_t __s0_22 = __p0_22; \
- uint16x4_t __s2_22 = __p2_22; \
- uint16x8_t __ret_22; \
- __ret_22 = vsetq_lane_u16(vget_lane_u16(__s2_22, __p3_22), __s0_22, __p1_22); \
- __ret_22; \
- })
- #else
- #define vcopyq_lane_u16(__p0_23, __p1_23, __p2_23, __p3_23) __extension__ ({ \
- uint16x8_t __s0_23 = __p0_23; \
- uint16x4_t __s2_23 = __p2_23; \
- uint16x8_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __rev2_23; __rev2_23 = __builtin_shufflevector(__s2_23, __s2_23, 3, 2, 1, 0); \
- uint16x8_t __ret_23; \
- __ret_23 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_23, __p3_23), __rev0_23, __p1_23); \
- __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_23; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_s8(__p0_24, __p1_24, __p2_24, __p3_24) __extension__ ({ \
- int8x16_t __s0_24 = __p0_24; \
- int8x8_t __s2_24 = __p2_24; \
- int8x16_t __ret_24; \
- __ret_24 = vsetq_lane_s8(vget_lane_s8(__s2_24, __p3_24), __s0_24, __p1_24); \
- __ret_24; \
- })
- #else
- #define vcopyq_lane_s8(__p0_25, __p1_25, __p2_25, __p3_25) __extension__ ({ \
- int8x16_t __s0_25 = __p0_25; \
- int8x8_t __s2_25 = __p2_25; \
- int8x16_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __rev2_25; __rev2_25 = __builtin_shufflevector(__s2_25, __s2_25, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret_25; \
- __ret_25 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_25, __p3_25), __rev0_25, __p1_25); \
- __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_25; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_f32(__p0_26, __p1_26, __p2_26, __p3_26) __extension__ ({ \
- float32x4_t __s0_26 = __p0_26; \
- float32x2_t __s2_26 = __p2_26; \
- float32x4_t __ret_26; \
- __ret_26 = vsetq_lane_f32(vget_lane_f32(__s2_26, __p3_26), __s0_26, __p1_26); \
- __ret_26; \
- })
- #else
- #define vcopyq_lane_f32(__p0_27, __p1_27, __p2_27, __p3_27) __extension__ ({ \
- float32x4_t __s0_27 = __p0_27; \
- float32x2_t __s2_27 = __p2_27; \
- float32x4_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \
- float32x2_t __rev2_27; __rev2_27 = __builtin_shufflevector(__s2_27, __s2_27, 1, 0); \
- float32x4_t __ret_27; \
- __ret_27 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_27, __p3_27), __rev0_27, __p1_27); \
- __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 3, 2, 1, 0); \
- __ret_27; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_s32(__p0_28, __p1_28, __p2_28, __p3_28) __extension__ ({ \
- int32x4_t __s0_28 = __p0_28; \
- int32x2_t __s2_28 = __p2_28; \
- int32x4_t __ret_28; \
- __ret_28 = vsetq_lane_s32(vget_lane_s32(__s2_28, __p3_28), __s0_28, __p1_28); \
- __ret_28; \
- })
- #else
- #define vcopyq_lane_s32(__p0_29, __p1_29, __p2_29, __p3_29) __extension__ ({ \
- int32x4_t __s0_29 = __p0_29; \
- int32x2_t __s2_29 = __p2_29; \
- int32x4_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 3, 2, 1, 0); \
- int32x2_t __rev2_29; __rev2_29 = __builtin_shufflevector(__s2_29, __s2_29, 1, 0); \
- int32x4_t __ret_29; \
- __ret_29 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_29, __p3_29), __rev0_29, __p1_29); \
- __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 3, 2, 1, 0); \
- __ret_29; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_s64(__p0_30, __p1_30, __p2_30, __p3_30) __extension__ ({ \
- int64x2_t __s0_30 = __p0_30; \
- int64x1_t __s2_30 = __p2_30; \
- int64x2_t __ret_30; \
- __ret_30 = vsetq_lane_s64(vget_lane_s64(__s2_30, __p3_30), __s0_30, __p1_30); \
- __ret_30; \
- })
- #else
- #define vcopyq_lane_s64(__p0_31, __p1_31, __p2_31, __p3_31) __extension__ ({ \
- int64x2_t __s0_31 = __p0_31; \
- int64x1_t __s2_31 = __p2_31; \
- int64x2_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \
- int64x2_t __ret_31; \
- __ret_31 = __noswap_vsetq_lane_s64(__noswap_vget_lane_s64(__s2_31, __p3_31), __rev0_31, __p1_31); \
- __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \
- __ret_31; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_s16(__p0_32, __p1_32, __p2_32, __p3_32) __extension__ ({ \
- int16x8_t __s0_32 = __p0_32; \
- int16x4_t __s2_32 = __p2_32; \
- int16x8_t __ret_32; \
- __ret_32 = vsetq_lane_s16(vget_lane_s16(__s2_32, __p3_32), __s0_32, __p1_32); \
- __ret_32; \
- })
- #else
- #define vcopyq_lane_s16(__p0_33, __p1_33, __p2_33, __p3_33) __extension__ ({ \
- int16x8_t __s0_33 = __p0_33; \
- int16x4_t __s2_33 = __p2_33; \
- int16x8_t __rev0_33; __rev0_33 = __builtin_shufflevector(__s0_33, __s0_33, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev2_33; __rev2_33 = __builtin_shufflevector(__s2_33, __s2_33, 3, 2, 1, 0); \
- int16x8_t __ret_33; \
- __ret_33 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_33, __p3_33), __rev0_33, __p1_33); \
- __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_33; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_p8(__p0_34, __p1_34, __p2_34, __p3_34) __extension__ ({ \
- poly8x8_t __s0_34 = __p0_34; \
- poly8x8_t __s2_34 = __p2_34; \
- poly8x8_t __ret_34; \
- __ret_34 = vset_lane_p8(vget_lane_p8(__s2_34, __p3_34), __s0_34, __p1_34); \
- __ret_34; \
- })
- #else
- #define vcopy_lane_p8(__p0_35, __p1_35, __p2_35, __p3_35) __extension__ ({ \
- poly8x8_t __s0_35 = __p0_35; \
- poly8x8_t __s2_35 = __p2_35; \
- poly8x8_t __rev0_35; __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __rev2_35; __rev2_35 = __builtin_shufflevector(__s2_35, __s2_35, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __ret_35; \
- __ret_35 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_35, __p3_35), __rev0_35, __p1_35); \
- __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_35; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_p16(__p0_36, __p1_36, __p2_36, __p3_36) __extension__ ({ \
- poly16x4_t __s0_36 = __p0_36; \
- poly16x4_t __s2_36 = __p2_36; \
- poly16x4_t __ret_36; \
- __ret_36 = vset_lane_p16(vget_lane_p16(__s2_36, __p3_36), __s0_36, __p1_36); \
- __ret_36; \
- })
- #else
- #define vcopy_lane_p16(__p0_37, __p1_37, __p2_37, __p3_37) __extension__ ({ \
- poly16x4_t __s0_37 = __p0_37; \
- poly16x4_t __s2_37 = __p2_37; \
- poly16x4_t __rev0_37; __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 3, 2, 1, 0); \
- poly16x4_t __rev2_37; __rev2_37 = __builtin_shufflevector(__s2_37, __s2_37, 3, 2, 1, 0); \
- poly16x4_t __ret_37; \
- __ret_37 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_37, __p3_37), __rev0_37, __p1_37); \
- __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 3, 2, 1, 0); \
- __ret_37; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_u8(__p0_38, __p1_38, __p2_38, __p3_38) __extension__ ({ \
- uint8x8_t __s0_38 = __p0_38; \
- uint8x8_t __s2_38 = __p2_38; \
- uint8x8_t __ret_38; \
- __ret_38 = vset_lane_u8(vget_lane_u8(__s2_38, __p3_38), __s0_38, __p1_38); \
- __ret_38; \
- })
- #else
- #define vcopy_lane_u8(__p0_39, __p1_39, __p2_39, __p3_39) __extension__ ({ \
- uint8x8_t __s0_39 = __p0_39; \
- uint8x8_t __s2_39 = __p2_39; \
- uint8x8_t __rev0_39; __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __rev2_39; __rev2_39 = __builtin_shufflevector(__s2_39, __s2_39, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret_39; \
- __ret_39 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_39, __p3_39), __rev0_39, __p1_39); \
- __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_39; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_u32(__p0_40, __p1_40, __p2_40, __p3_40) __extension__ ({ \
- uint32x2_t __s0_40 = __p0_40; \
- uint32x2_t __s2_40 = __p2_40; \
- uint32x2_t __ret_40; \
- __ret_40 = vset_lane_u32(vget_lane_u32(__s2_40, __p3_40), __s0_40, __p1_40); \
- __ret_40; \
- })
- #else
- #define vcopy_lane_u32(__p0_41, __p1_41, __p2_41, __p3_41) __extension__ ({ \
- uint32x2_t __s0_41 = __p0_41; \
- uint32x2_t __s2_41 = __p2_41; \
- uint32x2_t __rev0_41; __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 1, 0); \
- uint32x2_t __rev2_41; __rev2_41 = __builtin_shufflevector(__s2_41, __s2_41, 1, 0); \
- uint32x2_t __ret_41; \
- __ret_41 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_41, __p3_41), __rev0_41, __p1_41); \
- __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 1, 0); \
- __ret_41; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_u64(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
- uint64x1_t __s0_42 = __p0_42; \
- uint64x1_t __s2_42 = __p2_42; \
- uint64x1_t __ret_42; \
- __ret_42 = vset_lane_u64(vget_lane_u64(__s2_42, __p3_42), __s0_42, __p1_42); \
- __ret_42; \
- })
- #else
- #define vcopy_lane_u64(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
- uint64x1_t __s0_43 = __p0_43; \
- uint64x1_t __s2_43 = __p2_43; \
- uint64x1_t __ret_43; \
- __ret_43 = __noswap_vset_lane_u64(__noswap_vget_lane_u64(__s2_43, __p3_43), __s0_43, __p1_43); \
- __ret_43; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
- uint16x4_t __s0_44 = __p0_44; \
- uint16x4_t __s2_44 = __p2_44; \
- uint16x4_t __ret_44; \
- __ret_44 = vset_lane_u16(vget_lane_u16(__s2_44, __p3_44), __s0_44, __p1_44); \
- __ret_44; \
- })
- #else
- #define vcopy_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
- uint16x4_t __s0_45 = __p0_45; \
- uint16x4_t __s2_45 = __p2_45; \
- uint16x4_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 3, 2, 1, 0); \
- uint16x4_t __rev2_45; __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \
- uint16x4_t __ret_45; \
- __ret_45 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_45, __p3_45), __rev0_45, __p1_45); \
- __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 3, 2, 1, 0); \
- __ret_45; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_s8(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
- int8x8_t __s0_46 = __p0_46; \
- int8x8_t __s2_46 = __p2_46; \
- int8x8_t __ret_46; \
- __ret_46 = vset_lane_s8(vget_lane_s8(__s2_46, __p3_46), __s0_46, __p1_46); \
- __ret_46; \
- })
- #else
- #define vcopy_lane_s8(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
- int8x8_t __s0_47 = __p0_47; \
- int8x8_t __s2_47 = __p2_47; \
- int8x8_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret_47; \
- __ret_47 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_47, __p3_47), __rev0_47, __p1_47); \
- __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_47; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_f32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
- float32x2_t __s0_48 = __p0_48; \
- float32x2_t __s2_48 = __p2_48; \
- float32x2_t __ret_48; \
- __ret_48 = vset_lane_f32(vget_lane_f32(__s2_48, __p3_48), __s0_48, __p1_48); \
- __ret_48; \
- })
- #else
- #define vcopy_lane_f32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
- float32x2_t __s0_49 = __p0_49; \
- float32x2_t __s2_49 = __p2_49; \
- float32x2_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 1, 0); \
- float32x2_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \
- float32x2_t __ret_49; \
- __ret_49 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_49, __p3_49), __rev0_49, __p1_49); \
- __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 1, 0); \
- __ret_49; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_s32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
- int32x2_t __s0_50 = __p0_50; \
- int32x2_t __s2_50 = __p2_50; \
- int32x2_t __ret_50; \
- __ret_50 = vset_lane_s32(vget_lane_s32(__s2_50, __p3_50), __s0_50, __p1_50); \
- __ret_50; \
- })
- #else
- #define vcopy_lane_s32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
- int32x2_t __s0_51 = __p0_51; \
- int32x2_t __s2_51 = __p2_51; \
- int32x2_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 1, 0); \
- int32x2_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 1, 0); \
- int32x2_t __ret_51; \
- __ret_51 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_51, __p3_51), __rev0_51, __p1_51); \
- __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 1, 0); \
- __ret_51; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_s64(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
- int64x1_t __s0_52 = __p0_52; \
- int64x1_t __s2_52 = __p2_52; \
- int64x1_t __ret_52; \
- __ret_52 = vset_lane_s64(vget_lane_s64(__s2_52, __p3_52), __s0_52, __p1_52); \
- __ret_52; \
- })
- #else
- #define vcopy_lane_s64(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
- int64x1_t __s0_53 = __p0_53; \
- int64x1_t __s2_53 = __p2_53; \
- int64x1_t __ret_53; \
- __ret_53 = __noswap_vset_lane_s64(__noswap_vget_lane_s64(__s2_53, __p3_53), __s0_53, __p1_53); \
- __ret_53; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_s16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
- int16x4_t __s0_54 = __p0_54; \
- int16x4_t __s2_54 = __p2_54; \
- int16x4_t __ret_54; \
- __ret_54 = vset_lane_s16(vget_lane_s16(__s2_54, __p3_54), __s0_54, __p1_54); \
- __ret_54; \
- })
- #else
- #define vcopy_lane_s16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
- int16x4_t __s0_55 = __p0_55; \
- int16x4_t __s2_55 = __p2_55; \
- int16x4_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \
- int16x4_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
- int16x4_t __ret_55; \
- __ret_55 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_55, __p3_55), __rev0_55, __p1_55); \
- __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \
- __ret_55; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_p8(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
- poly8x16_t __s0_56 = __p0_56; \
- poly8x16_t __s2_56 = __p2_56; \
- poly8x16_t __ret_56; \
- __ret_56 = vsetq_lane_p8(vgetq_lane_p8(__s2_56, __p3_56), __s0_56, __p1_56); \
- __ret_56; \
- })
- #else
- #define vcopyq_laneq_p8(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
- poly8x16_t __s0_57 = __p0_57; \
- poly8x16_t __s2_57 = __p2_57; \
- poly8x16_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __ret_57; \
- __ret_57 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_57, __p3_57), __rev0_57, __p1_57); \
- __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_57; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_p16(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
- poly16x8_t __s0_58 = __p0_58; \
- poly16x8_t __s2_58 = __p2_58; \
- poly16x8_t __ret_58; \
- __ret_58 = vsetq_lane_p16(vgetq_lane_p16(__s2_58, __p3_58), __s0_58, __p1_58); \
- __ret_58; \
- })
- #else
- #define vcopyq_laneq_p16(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
- poly16x8_t __s0_59 = __p0_59; \
- poly16x8_t __s2_59 = __p2_59; \
- poly16x8_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __ret_59; \
- __ret_59 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_59, __p3_59), __rev0_59, __p1_59); \
- __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_59; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_u8(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
- uint8x16_t __s0_60 = __p0_60; \
- uint8x16_t __s2_60 = __p2_60; \
- uint8x16_t __ret_60; \
- __ret_60 = vsetq_lane_u8(vgetq_lane_u8(__s2_60, __p3_60), __s0_60, __p1_60); \
- __ret_60; \
- })
- #else
- #define vcopyq_laneq_u8(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
- uint8x16_t __s0_61 = __p0_61; \
- uint8x16_t __s2_61 = __p2_61; \
- uint8x16_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret_61; \
- __ret_61 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_61, __p3_61), __rev0_61, __p1_61); \
- __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_61; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
- uint32x4_t __s0_62 = __p0_62; \
- uint32x4_t __s2_62 = __p2_62; \
- uint32x4_t __ret_62; \
- __ret_62 = vsetq_lane_u32(vgetq_lane_u32(__s2_62, __p3_62), __s0_62, __p1_62); \
- __ret_62; \
- })
- #else
- #define vcopyq_laneq_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
- uint32x4_t __s0_63 = __p0_63; \
- uint32x4_t __s2_63 = __p2_63; \
- uint32x4_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \
- uint32x4_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 3, 2, 1, 0); \
- uint32x4_t __ret_63; \
- __ret_63 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_63, __p3_63), __rev0_63, __p1_63); \
- __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \
- __ret_63; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_u64(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
- uint64x2_t __s0_64 = __p0_64; \
- uint64x2_t __s2_64 = __p2_64; \
- uint64x2_t __ret_64; \
- __ret_64 = vsetq_lane_u64(vgetq_lane_u64(__s2_64, __p3_64), __s0_64, __p1_64); \
- __ret_64; \
- })
- #else
- #define vcopyq_laneq_u64(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
- uint64x2_t __s0_65 = __p0_65; \
- uint64x2_t __s2_65 = __p2_65; \
- uint64x2_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 1, 0); \
- uint64x2_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 1, 0); \
- uint64x2_t __ret_65; \
- __ret_65 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_65, __p3_65), __rev0_65, __p1_65); \
- __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 1, 0); \
- __ret_65; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_u16(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
- uint16x8_t __s0_66 = __p0_66; \
- uint16x8_t __s2_66 = __p2_66; \
- uint16x8_t __ret_66; \
- __ret_66 = vsetq_lane_u16(vgetq_lane_u16(__s2_66, __p3_66), __s0_66, __p1_66); \
- __ret_66; \
- })
- #else
- #define vcopyq_laneq_u16(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
- uint16x8_t __s0_67 = __p0_67; \
- uint16x8_t __s2_67 = __p2_67; \
- uint16x8_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret_67; \
- __ret_67 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_67, __p3_67), __rev0_67, __p1_67); \
- __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_67; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_s8(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
- int8x16_t __s0_68 = __p0_68; \
- int8x16_t __s2_68 = __p2_68; \
- int8x16_t __ret_68; \
- __ret_68 = vsetq_lane_s8(vgetq_lane_s8(__s2_68, __p3_68), __s0_68, __p1_68); \
- __ret_68; \
- })
- #else
- #define vcopyq_laneq_s8(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
- int8x16_t __s0_69 = __p0_69; \
- int8x16_t __s2_69 = __p2_69; \
- int8x16_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret_69; \
- __ret_69 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_69, __p3_69), __rev0_69, __p1_69); \
- __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_69; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_f32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
- float32x4_t __s0_70 = __p0_70; \
- float32x4_t __s2_70 = __p2_70; \
- float32x4_t __ret_70; \
- __ret_70 = vsetq_lane_f32(vgetq_lane_f32(__s2_70, __p3_70), __s0_70, __p1_70); \
- __ret_70; \
- })
- #else
- #define vcopyq_laneq_f32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
- float32x4_t __s0_71 = __p0_71; \
- float32x4_t __s2_71 = __p2_71; \
- float32x4_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \
- float32x4_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
- float32x4_t __ret_71; \
- __ret_71 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_71, __p3_71), __rev0_71, __p1_71); \
- __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \
- __ret_71; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_s32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
- int32x4_t __s0_72 = __p0_72; \
- int32x4_t __s2_72 = __p2_72; \
- int32x4_t __ret_72; \
- __ret_72 = vsetq_lane_s32(vgetq_lane_s32(__s2_72, __p3_72), __s0_72, __p1_72); \
- __ret_72; \
- })
- #else
- #define vcopyq_laneq_s32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
- int32x4_t __s0_73 = __p0_73; \
- int32x4_t __s2_73 = __p2_73; \
- int32x4_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \
- int32x4_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 3, 2, 1, 0); \
- int32x4_t __ret_73; \
- __ret_73 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_73, __p3_73), __rev0_73, __p1_73); \
- __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \
- __ret_73; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_s64(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
- int64x2_t __s0_74 = __p0_74; \
- int64x2_t __s2_74 = __p2_74; \
- int64x2_t __ret_74; \
- __ret_74 = vsetq_lane_s64(vgetq_lane_s64(__s2_74, __p3_74), __s0_74, __p1_74); \
- __ret_74; \
- })
- #else
- #define vcopyq_laneq_s64(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
- int64x2_t __s0_75 = __p0_75; \
- int64x2_t __s2_75 = __p2_75; \
- int64x2_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 1, 0); \
- int64x2_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 1, 0); \
- int64x2_t __ret_75; \
- __ret_75 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_75, __p3_75), __rev0_75, __p1_75); \
- __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 1, 0); \
- __ret_75; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_s16(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
- int16x8_t __s0_76 = __p0_76; \
- int16x8_t __s2_76 = __p2_76; \
- int16x8_t __ret_76; \
- __ret_76 = vsetq_lane_s16(vgetq_lane_s16(__s2_76, __p3_76), __s0_76, __p1_76); \
- __ret_76; \
- })
- #else
- #define vcopyq_laneq_s16(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
- int16x8_t __s0_77 = __p0_77; \
- int16x8_t __s2_77 = __p2_77; \
- int16x8_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret_77; \
- __ret_77 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_77, __p3_77), __rev0_77, __p1_77); \
- __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_77; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_p8(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
- poly8x8_t __s0_78 = __p0_78; \
- poly8x16_t __s2_78 = __p2_78; \
- poly8x8_t __ret_78; \
- __ret_78 = vset_lane_p8(vgetq_lane_p8(__s2_78, __p3_78), __s0_78, __p1_78); \
- __ret_78; \
- })
- #else
- #define vcopy_laneq_p8(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
- poly8x8_t __s0_79 = __p0_79; \
- poly8x16_t __s2_79 = __p2_79; \
- poly8x8_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __ret_79; \
- __ret_79 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_79, __p3_79), __rev0_79, __p1_79); \
- __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_79; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_p16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
- poly16x4_t __s0_80 = __p0_80; \
- poly16x8_t __s2_80 = __p2_80; \
- poly16x4_t __ret_80; \
- __ret_80 = vset_lane_p16(vgetq_lane_p16(__s2_80, __p3_80), __s0_80, __p1_80); \
- __ret_80; \
- })
- #else
- #define vcopy_laneq_p16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
- poly16x4_t __s0_81 = __p0_81; \
- poly16x8_t __s2_81 = __p2_81; \
- poly16x4_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \
- poly16x8_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x4_t __ret_81; \
- __ret_81 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_81, __p3_81), __rev0_81, __p1_81); \
- __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \
- __ret_81; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_u8(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \
- uint8x8_t __s0_82 = __p0_82; \
- uint8x16_t __s2_82 = __p2_82; \
- uint8x8_t __ret_82; \
- __ret_82 = vset_lane_u8(vgetq_lane_u8(__s2_82, __p3_82), __s0_82, __p1_82); \
- __ret_82; \
- })
- #else
- #define vcopy_laneq_u8(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \
- uint8x8_t __s0_83 = __p0_83; \
- uint8x16_t __s2_83 = __p2_83; \
- uint8x8_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret_83; \
- __ret_83 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_83, __p3_83), __rev0_83, __p1_83); \
- __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_83; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_u32(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \
- uint32x2_t __s0_84 = __p0_84; \
- uint32x4_t __s2_84 = __p2_84; \
- uint32x2_t __ret_84; \
- __ret_84 = vset_lane_u32(vgetq_lane_u32(__s2_84, __p3_84), __s0_84, __p1_84); \
- __ret_84; \
- })
- #else
- #define vcopy_laneq_u32(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \
- uint32x2_t __s0_85 = __p0_85; \
- uint32x4_t __s2_85 = __p2_85; \
- uint32x2_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 1, 0); \
- uint32x4_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 3, 2, 1, 0); \
- uint32x2_t __ret_85; \
- __ret_85 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_85, __p3_85), __rev0_85, __p1_85); \
- __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 1, 0); \
- __ret_85; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_u64(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \
- uint64x1_t __s0_86 = __p0_86; \
- uint64x2_t __s2_86 = __p2_86; \
- uint64x1_t __ret_86; \
- __ret_86 = vset_lane_u64(vgetq_lane_u64(__s2_86, __p3_86), __s0_86, __p1_86); \
- __ret_86; \
- })
- #else
- #define vcopy_laneq_u64(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \
- uint64x1_t __s0_87 = __p0_87; \
- uint64x2_t __s2_87 = __p2_87; \
- uint64x2_t __rev2_87; __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 1, 0); \
- uint64x1_t __ret_87; \
- __ret_87 = __noswap_vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_87, __p3_87), __s0_87, __p1_87); \
- __ret_87; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_u16(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \
- uint16x4_t __s0_88 = __p0_88; \
- uint16x8_t __s2_88 = __p2_88; \
- uint16x4_t __ret_88; \
- __ret_88 = vset_lane_u16(vgetq_lane_u16(__s2_88, __p3_88), __s0_88, __p1_88); \
- __ret_88; \
- })
- #else
- #define vcopy_laneq_u16(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \
- uint16x4_t __s0_89 = __p0_89; \
- uint16x8_t __s2_89 = __p2_89; \
- uint16x4_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \
- uint16x8_t __rev2_89; __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __ret_89; \
- __ret_89 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_89, __p3_89), __rev0_89, __p1_89); \
- __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \
- __ret_89; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_s8(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \
- int8x8_t __s0_90 = __p0_90; \
- int8x16_t __s2_90 = __p2_90; \
- int8x8_t __ret_90; \
- __ret_90 = vset_lane_s8(vgetq_lane_s8(__s2_90, __p3_90), __s0_90, __p1_90); \
- __ret_90; \
- })
- #else
- #define vcopy_laneq_s8(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \
- int8x8_t __s0_91 = __p0_91; \
- int8x16_t __s2_91 = __p2_91; \
- int8x8_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __rev2_91; __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret_91; \
- __ret_91 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_91, __p3_91), __rev0_91, __p1_91); \
- __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_91; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_f32(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \
- float32x2_t __s0_92 = __p0_92; \
- float32x4_t __s2_92 = __p2_92; \
- float32x2_t __ret_92; \
- __ret_92 = vset_lane_f32(vgetq_lane_f32(__s2_92, __p3_92), __s0_92, __p1_92); \
- __ret_92; \
- })
- #else
- #define vcopy_laneq_f32(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \
- float32x2_t __s0_93 = __p0_93; \
- float32x4_t __s2_93 = __p2_93; \
- float32x2_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
- float32x4_t __rev2_93; __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 3, 2, 1, 0); \
- float32x2_t __ret_93; \
- __ret_93 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_93, __p3_93), __rev0_93, __p1_93); \
- __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
- __ret_93; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_s32(__p0_94, __p1_94, __p2_94, __p3_94) __extension__ ({ \
- int32x2_t __s0_94 = __p0_94; \
- int32x4_t __s2_94 = __p2_94; \
- int32x2_t __ret_94; \
- __ret_94 = vset_lane_s32(vgetq_lane_s32(__s2_94, __p3_94), __s0_94, __p1_94); \
- __ret_94; \
- })
- #else
- #define vcopy_laneq_s32(__p0_95, __p1_95, __p2_95, __p3_95) __extension__ ({ \
- int32x2_t __s0_95 = __p0_95; \
- int32x4_t __s2_95 = __p2_95; \
- int32x2_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 1, 0); \
- int32x4_t __rev2_95; __rev2_95 = __builtin_shufflevector(__s2_95, __s2_95, 3, 2, 1, 0); \
- int32x2_t __ret_95; \
- __ret_95 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_95, __p3_95), __rev0_95, __p1_95); \
- __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 1, 0); \
- __ret_95; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_s64(__p0_96, __p1_96, __p2_96, __p3_96) __extension__ ({ \
- int64x1_t __s0_96 = __p0_96; \
- int64x2_t __s2_96 = __p2_96; \
- int64x1_t __ret_96; \
- __ret_96 = vset_lane_s64(vgetq_lane_s64(__s2_96, __p3_96), __s0_96, __p1_96); \
- __ret_96; \
- })
- #else
- #define vcopy_laneq_s64(__p0_97, __p1_97, __p2_97, __p3_97) __extension__ ({ \
- int64x1_t __s0_97 = __p0_97; \
- int64x2_t __s2_97 = __p2_97; \
- int64x2_t __rev2_97; __rev2_97 = __builtin_shufflevector(__s2_97, __s2_97, 1, 0); \
- int64x1_t __ret_97; \
- __ret_97 = __noswap_vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_97, __p3_97), __s0_97, __p1_97); \
- __ret_97; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_s16(__p0_98, __p1_98, __p2_98, __p3_98) __extension__ ({ \
- int16x4_t __s0_98 = __p0_98; \
- int16x8_t __s2_98 = __p2_98; \
- int16x4_t __ret_98; \
- __ret_98 = vset_lane_s16(vgetq_lane_s16(__s2_98, __p3_98), __s0_98, __p1_98); \
- __ret_98; \
- })
- #else
- #define vcopy_laneq_s16(__p0_99, __p1_99, __p2_99, __p3_99) __extension__ ({ \
- int16x4_t __s0_99 = __p0_99; \
- int16x8_t __s2_99 = __p2_99; \
- int16x4_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \
- int16x8_t __rev2_99; __rev2_99 = __builtin_shufflevector(__s2_99, __s2_99, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __ret_99; \
- __ret_99 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_99, __p3_99), __rev0_99, __p1_99); \
- __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 3, 2, 1, 0); \
- __ret_99; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vcreate_p64(uint64_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai poly64x1_t vcreate_p64(uint64_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vcreate_f64(uint64_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #else
- __ai float64x1_t vcreate_f64(uint64_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t)(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vcvts_f32_s32(int32_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
- return __ret;
- }
- #else
- __ai float32_t vcvts_f32_s32(int32_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vcvts_f32_u32(uint32_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
- return __ret;
- }
- #else
- __ai float32_t vcvts_f32_u32(uint32_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vcvtd_f64_s64(int64_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
- return __ret;
- }
- #else
- __ai float64_t vcvtd_f64_s64(int64_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vcvtd_f64_u64(uint64_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
- return __ret;
- }
- #else
- __ai float64_t vcvtd_f64_u64(uint64_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
- return __ret;
- }
- #else
- __ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #else
- __ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
- float16x8_t __ret;
- __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
- return __ret;
- }
- #else
- __ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float16x8_t __ret;
- __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
- float32x4_t __ret;
- __ret = vcvt_f32_f16(vget_high_f16(__p0));
- return __ret;
- }
- #else
- __ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
- float32x4_t __ret;
- __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
- return __ret;
- }
- #else
- __ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x4_t __ret;
- __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
- float64x2_t __ret;
- __ret = vcvt_f64_f32(vget_high_f32(__p0));
- return __ret;
- }
- #else
- __ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float64x2_t __ret;
- __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
- uint32_t __s0 = __p0; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
- uint32_t __s0 = __p0; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
- __ret; \
- })
- #else
- #define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
- __ret; \
- })
- #else
- #define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #else
- #define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #else
- #define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
- float32_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
- float32_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
- __ret; \
- })
- #else
- #define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __ret; \
- __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #else
- #define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
- float64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
- float64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
- float32_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
- float32_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
- __ret; \
- })
- #else
- #define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __ret; \
- __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #else
- #define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
- float64_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
- float64_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vcvts_s32_f32(float32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
- return __ret;
- }
- #else
- __ai int32_t vcvts_s32_f32(float32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcvtd_s64_f64(float64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vcvtd_s64_f64(float64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcvts_u32_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vcvts_u32_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcvtd_u64_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vcvtd_u64_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vcvtas_s32_f32(float32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
- return __ret;
- }
- #else
- __ai int32_t vcvtas_s32_f32(float32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcvtad_s64_f64(float64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vcvtad_s64_f64(float64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcvtas_u32_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vcvtas_u32_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcvtad_u64_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vcvtad_u64_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vcvtms_s32_f32(float32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
- return __ret;
- }
- #else
- __ai int32_t vcvtms_s32_f32(float32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcvtmd_s64_f64(float64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vcvtmd_s64_f64(float64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcvtms_u32_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vcvtms_u32_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vcvtns_s32_f32(float32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
- return __ret;
- }
- #else
- __ai int32_t vcvtns_s32_f32(float32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcvtnd_s64_f64(float64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vcvtnd_s64_f64(float64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcvtns_u32_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vcvtns_u32_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vcvtps_s32_f32(float32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
- return __ret;
- }
- #else
- __ai int32_t vcvtps_s32_f32(float32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vcvtpd_s64_f64(float64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vcvtpd_s64_f64(float64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vcvtps_u32_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vcvtps_u32_f32(float32_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
- return __ret;
- }
- #else
- __ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vcvtxd_f32_f64(float64_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
- return __ret;
- }
- #else
- __ai float32_t vcvtxd_f32_f64(float64_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
- float32x4_t __ret;
- __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
- return __ret;
- }
- #else
- __ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x4_t __ret;
- __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = __p0 / __p1;
- return __ret;
- }
- #else
- __ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __rev0 / __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = __p0 / __p1;
- return __ret;
- }
- #else
- __ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __rev0 / __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = __p0 / __p1;
- return __ret;
- }
- #else
- __ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = __p0 / __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = __p0 / __p1;
- return __ret;
- }
- #else
- __ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __rev0 / __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8_t __ret; \
- __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
- poly8x8_t __s0 = __p0; \
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8_t __ret; \
- __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vduph_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16_t __ret; \
- __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vduph_lane_p16(__p0, __p1) __extension__ ({ \
- poly16x4_t __s0 = __p0; \
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- poly16_t __ret; \
- __ret = (poly16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
- uint8x8_t __s0 = __p0; \
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdups_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdups_lane_u32(__p0, __p1) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
- uint64x1_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vduph_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vduph_lane_u16(__p0, __p1) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
- int8x8_t __s0 = __p0; \
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vdupd_lane_f64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdups_lane_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdups_lane_f32(__p0, __p1) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vdups_lane_f32((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdups_lane_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdups_lane_s32(__p0, __p1) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vdups_lane_i32((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
- int64x1_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vduph_lane_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vduph_lane_s16(__p0, __p1) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vduph_lane_i16((int8x8_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_p64(__p0, __p1) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_p64(__p0, __p1) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_p64(__p0, __p1) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_lane_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_lane_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_lane_f16(__p0, __p1) __extension__ ({ \
- float16x4_t __s0 = __p0; \
- float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8_t __ret; \
- __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8_t __ret; \
- __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16_t __ret; \
- __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16_t __ret; \
- __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vdups_laneq_f32((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_p64(__p0, __p1) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- poly64x1_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_p8(__p0, __p1) __extension__ ({ \
- poly8x16_t __s0 = __p0; \
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_p64(__p0, __p1) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- poly64x2_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_p16(__p0, __p1) __extension__ ({ \
- poly16x8_t __s0 = __p0; \
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly16x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x2_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x2_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x2_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdupq_laneq_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_u8(__p0, __p1) __extension__ ({ \
- uint8x16_t __s0 = __p0; \
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_u32(__p0, __p1) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x2_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_u64(__p0, __p1) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint64x1_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_u16(__p0, __p1) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x8_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_s8(__p0, __p1) __extension__ ({ \
- int8x16_t __s0 = __p0; \
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x8_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x1_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_f32(__p0, __p1) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x2_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_f16(__p0, __p1) __extension__ ({ \
- float16x8_t __s0 = __p0; \
- float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_s32(__p0, __p1) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x2_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x1_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_s64(__p0, __p1) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int64x1_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __ret; \
- __ret = __builtin_shufflevector(__s0, __s0, __p1, __p1, __p1, __p1); \
- __ret; \
- })
- #else
- #define vdup_laneq_s16(__p0, __p1) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __builtin_shufflevector(__rev0, __rev0, __p1, __p1, __p1, __p1); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vdup_n_p64(poly64_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t) {__p0};
- return __ret;
- }
- #else
- __ai poly64x1_t vdup_n_p64(poly64_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t) {__p0};
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vdupq_n_f64(float64_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai float64x2_t vdupq_n_f64(float64_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vdup_n_f64(float64_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) {__p0};
- return __ret;
- }
- #else
- __ai float64x1_t vdup_n_f64(float64_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) {__p0};
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
- __ret; \
- })
- #else
- #define vext_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
- __ret; \
- })
- #else
- #define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
- __ret; \
- })
- #else
- #define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vext_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
- __ret; \
- })
- #else
- #define vext_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
- return __ret;
- }
- __ai float64x1_t __noswap_vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64_t __s1 = __p1; \
- float64x1_t __s2 = __p2; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64_t __s1 = __p1; \
- float64x1_t __s2 = __p2; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #define __noswap_vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64_t __s1 = __p1; \
- float64x1_t __s2 = __p2; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__rev2, __p3); \
- __ret; \
- })
- #define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x1_t __s2 = __p2; \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
- __ret; \
- })
- #else
- #define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x1_t __s2 = __p2; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x1_t __s2 = __p2; \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
- __ret; \
- })
- #else
- #define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x1_t __s2 = __p2; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
- __ret; \
- })
- #else
- #define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x1_t __s2 = __p2; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
- __ret; \
- })
- #define __noswap_vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x1_t __s2 = __p2; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
- __ret; \
- })
- #else
- #define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __s2 = __p2; \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64_t __s1 = __p1; \
- float64x2_t __s2 = __p2; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64_t __s1 = __p1; \
- float64x2_t __s2 = __p2; \
- float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__rev2, __p3); \
- __ret; \
- })
- #define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64_t __s1 = __p1; \
- float64x2_t __s2 = __p2; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (int8x16_t)__s2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__rev2, __p3); \
- __ret; \
- })
- #define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32_t __s0 = __p0; \
- float32_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32_t __ret; \
- __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (int8x16_t)__s2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __s2 = __p2; \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
- __ret; \
- })
- #else
- #define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __s2 = __p2; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __s2 = __p2; \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
- __ret; \
- })
- #else
- #define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x4_t __ret; \
- __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x2_t __s2 = __p2; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
- __ret; \
- })
- #else
- #define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x2_t __s2 = __p2; \
- float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
- __ret; \
- })
- #define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x2_t __s2 = __p2; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
- __ret; \
- })
- #else
- #define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x2_t __ret; \
- __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
- float64x2_t __ret;
- __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
- return __ret;
- }
- #else
- __ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
- float32x4_t __ret;
- __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
- return __ret;
- }
- #else
- __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
- float32x2_t __ret;
- __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
- return __ret;
- }
- #else
- __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- float64x2_t __ret;
- __ret = vfmaq_f64(__p0, -__p1, __p2);
- return __ret;
- }
- #else
- __ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- float64x2_t __ret;
- __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = vfma_f64(__p0, -__p1, __p2);
- return __ret;
- }
- #else
- __ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = __noswap_vfma_f64(__p0, -__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsd_lane_f64(__p0_100, __p1_100, __p2_100, __p3_100) __extension__ ({ \
- float64_t __s0_100 = __p0_100; \
- float64_t __s1_100 = __p1_100; \
- float64x1_t __s2_100 = __p2_100; \
- float64_t __ret_100; \
- __ret_100 = vfmad_lane_f64(__s0_100, -__s1_100, __s2_100, __p3_100); \
- __ret_100; \
- })
- #else
- #define vfmsd_lane_f64(__p0_101, __p1_101, __p2_101, __p3_101) __extension__ ({ \
- float64_t __s0_101 = __p0_101; \
- float64_t __s1_101 = __p1_101; \
- float64x1_t __s2_101 = __p2_101; \
- float64_t __ret_101; \
- __ret_101 = __noswap_vfmad_lane_f64(__s0_101, -__s1_101, __s2_101, __p3_101); \
- __ret_101; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmss_lane_f32(__p0_102, __p1_102, __p2_102, __p3_102) __extension__ ({ \
- float32_t __s0_102 = __p0_102; \
- float32_t __s1_102 = __p1_102; \
- float32x2_t __s2_102 = __p2_102; \
- float32_t __ret_102; \
- __ret_102 = vfmas_lane_f32(__s0_102, -__s1_102, __s2_102, __p3_102); \
- __ret_102; \
- })
- #else
- #define vfmss_lane_f32(__p0_103, __p1_103, __p2_103, __p3_103) __extension__ ({ \
- float32_t __s0_103 = __p0_103; \
- float32_t __s1_103 = __p1_103; \
- float32x2_t __s2_103 = __p2_103; \
- float32x2_t __rev2_103; __rev2_103 = __builtin_shufflevector(__s2_103, __s2_103, 1, 0); \
- float32_t __ret_103; \
- __ret_103 = __noswap_vfmas_lane_f32(__s0_103, -__s1_103, __rev2_103, __p3_103); \
- __ret_103; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsq_lane_f64(__p0_104, __p1_104, __p2_104, __p3_104) __extension__ ({ \
- float64x2_t __s0_104 = __p0_104; \
- float64x2_t __s1_104 = __p1_104; \
- float64x1_t __s2_104 = __p2_104; \
- float64x2_t __ret_104; \
- __ret_104 = vfmaq_lane_f64(__s0_104, -__s1_104, __s2_104, __p3_104); \
- __ret_104; \
- })
- #else
- #define vfmsq_lane_f64(__p0_105, __p1_105, __p2_105, __p3_105) __extension__ ({ \
- float64x2_t __s0_105 = __p0_105; \
- float64x2_t __s1_105 = __p1_105; \
- float64x1_t __s2_105 = __p2_105; \
- float64x2_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 1, 0); \
- float64x2_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 1, 0); \
- float64x2_t __ret_105; \
- __ret_105 = __noswap_vfmaq_lane_f64(__rev0_105, -__rev1_105, __s2_105, __p3_105); \
- __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 1, 0); \
- __ret_105; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsq_lane_f32(__p0_106, __p1_106, __p2_106, __p3_106) __extension__ ({ \
- float32x4_t __s0_106 = __p0_106; \
- float32x4_t __s1_106 = __p1_106; \
- float32x2_t __s2_106 = __p2_106; \
- float32x4_t __ret_106; \
- __ret_106 = vfmaq_lane_f32(__s0_106, -__s1_106, __s2_106, __p3_106); \
- __ret_106; \
- })
- #else
- #define vfmsq_lane_f32(__p0_107, __p1_107, __p2_107, __p3_107) __extension__ ({ \
- float32x4_t __s0_107 = __p0_107; \
- float32x4_t __s1_107 = __p1_107; \
- float32x2_t __s2_107 = __p2_107; \
- float32x4_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 3, 2, 1, 0); \
- float32x4_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 3, 2, 1, 0); \
- float32x2_t __rev2_107; __rev2_107 = __builtin_shufflevector(__s2_107, __s2_107, 1, 0); \
- float32x4_t __ret_107; \
- __ret_107 = __noswap_vfmaq_lane_f32(__rev0_107, -__rev1_107, __rev2_107, __p3_107); \
- __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 3, 2, 1, 0); \
- __ret_107; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfms_lane_f64(__p0_108, __p1_108, __p2_108, __p3_108) __extension__ ({ \
- float64x1_t __s0_108 = __p0_108; \
- float64x1_t __s1_108 = __p1_108; \
- float64x1_t __s2_108 = __p2_108; \
- float64x1_t __ret_108; \
- __ret_108 = vfma_lane_f64(__s0_108, -__s1_108, __s2_108, __p3_108); \
- __ret_108; \
- })
- #else
- #define vfms_lane_f64(__p0_109, __p1_109, __p2_109, __p3_109) __extension__ ({ \
- float64x1_t __s0_109 = __p0_109; \
- float64x1_t __s1_109 = __p1_109; \
- float64x1_t __s2_109 = __p2_109; \
- float64x1_t __ret_109; \
- __ret_109 = __noswap_vfma_lane_f64(__s0_109, -__s1_109, __s2_109, __p3_109); \
- __ret_109; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfms_lane_f32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
- float32x2_t __s0_110 = __p0_110; \
- float32x2_t __s1_110 = __p1_110; \
- float32x2_t __s2_110 = __p2_110; \
- float32x2_t __ret_110; \
- __ret_110 = vfma_lane_f32(__s0_110, -__s1_110, __s2_110, __p3_110); \
- __ret_110; \
- })
- #else
- #define vfms_lane_f32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
- float32x2_t __s0_111 = __p0_111; \
- float32x2_t __s1_111 = __p1_111; \
- float32x2_t __s2_111 = __p2_111; \
- float32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
- float32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
- float32x2_t __rev2_111; __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \
- float32x2_t __ret_111; \
- __ret_111 = __noswap_vfma_lane_f32(__rev0_111, -__rev1_111, __rev2_111, __p3_111); \
- __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
- __ret_111; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsd_laneq_f64(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
- float64_t __s0_112 = __p0_112; \
- float64_t __s1_112 = __p1_112; \
- float64x2_t __s2_112 = __p2_112; \
- float64_t __ret_112; \
- __ret_112 = vfmad_laneq_f64(__s0_112, -__s1_112, __s2_112, __p3_112); \
- __ret_112; \
- })
- #else
- #define vfmsd_laneq_f64(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
- float64_t __s0_113 = __p0_113; \
- float64_t __s1_113 = __p1_113; \
- float64x2_t __s2_113 = __p2_113; \
- float64x2_t __rev2_113; __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 1, 0); \
- float64_t __ret_113; \
- __ret_113 = __noswap_vfmad_laneq_f64(__s0_113, -__s1_113, __rev2_113, __p3_113); \
- __ret_113; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmss_laneq_f32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
- float32_t __s0_114 = __p0_114; \
- float32_t __s1_114 = __p1_114; \
- float32x4_t __s2_114 = __p2_114; \
- float32_t __ret_114; \
- __ret_114 = vfmas_laneq_f32(__s0_114, -__s1_114, __s2_114, __p3_114); \
- __ret_114; \
- })
- #else
- #define vfmss_laneq_f32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
- float32_t __s0_115 = __p0_115; \
- float32_t __s1_115 = __p1_115; \
- float32x4_t __s2_115 = __p2_115; \
- float32x4_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 3, 2, 1, 0); \
- float32_t __ret_115; \
- __ret_115 = __noswap_vfmas_laneq_f32(__s0_115, -__s1_115, __rev2_115, __p3_115); \
- __ret_115; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsq_laneq_f64(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
- float64x2_t __s0_116 = __p0_116; \
- float64x2_t __s1_116 = __p1_116; \
- float64x2_t __s2_116 = __p2_116; \
- float64x2_t __ret_116; \
- __ret_116 = vfmaq_laneq_f64(__s0_116, -__s1_116, __s2_116, __p3_116); \
- __ret_116; \
- })
- #else
- #define vfmsq_laneq_f64(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
- float64x2_t __s0_117 = __p0_117; \
- float64x2_t __s1_117 = __p1_117; \
- float64x2_t __s2_117 = __p2_117; \
- float64x2_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 1, 0); \
- float64x2_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 1, 0); \
- float64x2_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 1, 0); \
- float64x2_t __ret_117; \
- __ret_117 = __noswap_vfmaq_laneq_f64(__rev0_117, -__rev1_117, __rev2_117, __p3_117); \
- __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 1, 0); \
- __ret_117; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfmsq_laneq_f32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \
- float32x4_t __s0_118 = __p0_118; \
- float32x4_t __s1_118 = __p1_118; \
- float32x4_t __s2_118 = __p2_118; \
- float32x4_t __ret_118; \
- __ret_118 = vfmaq_laneq_f32(__s0_118, -__s1_118, __s2_118, __p3_118); \
- __ret_118; \
- })
- #else
- #define vfmsq_laneq_f32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \
- float32x4_t __s0_119 = __p0_119; \
- float32x4_t __s1_119 = __p1_119; \
- float32x4_t __s2_119 = __p2_119; \
- float32x4_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 3, 2, 1, 0); \
- float32x4_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 3, 2, 1, 0); \
- float32x4_t __rev2_119; __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 3, 2, 1, 0); \
- float32x4_t __ret_119; \
- __ret_119 = __noswap_vfmaq_laneq_f32(__rev0_119, -__rev1_119, __rev2_119, __p3_119); \
- __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 3, 2, 1, 0); \
- __ret_119; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfms_laneq_f64(__p0_120, __p1_120, __p2_120, __p3_120) __extension__ ({ \
- float64x1_t __s0_120 = __p0_120; \
- float64x1_t __s1_120 = __p1_120; \
- float64x2_t __s2_120 = __p2_120; \
- float64x1_t __ret_120; \
- __ret_120 = vfma_laneq_f64(__s0_120, -__s1_120, __s2_120, __p3_120); \
- __ret_120; \
- })
- #else
- #define vfms_laneq_f64(__p0_121, __p1_121, __p2_121, __p3_121) __extension__ ({ \
- float64x1_t __s0_121 = __p0_121; \
- float64x1_t __s1_121 = __p1_121; \
- float64x2_t __s2_121 = __p2_121; \
- float64x2_t __rev2_121; __rev2_121 = __builtin_shufflevector(__s2_121, __s2_121, 1, 0); \
- float64x1_t __ret_121; \
- __ret_121 = __noswap_vfma_laneq_f64(__s0_121, -__s1_121, __rev2_121, __p3_121); \
- __ret_121; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vfms_laneq_f32(__p0_122, __p1_122, __p2_122, __p3_122) __extension__ ({ \
- float32x2_t __s0_122 = __p0_122; \
- float32x2_t __s1_122 = __p1_122; \
- float32x4_t __s2_122 = __p2_122; \
- float32x2_t __ret_122; \
- __ret_122 = vfma_laneq_f32(__s0_122, -__s1_122, __s2_122, __p3_122); \
- __ret_122; \
- })
- #else
- #define vfms_laneq_f32(__p0_123, __p1_123, __p2_123, __p3_123) __extension__ ({ \
- float32x2_t __s0_123 = __p0_123; \
- float32x2_t __s1_123 = __p1_123; \
- float32x4_t __s2_123 = __p2_123; \
- float32x2_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \
- float32x2_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \
- float32x4_t __rev2_123; __rev2_123 = __builtin_shufflevector(__s2_123, __s2_123, 3, 2, 1, 0); \
- float32x2_t __ret_123; \
- __ret_123 = __noswap_vfma_laneq_f32(__rev0_123, -__rev1_123, __rev2_123, __p3_123); \
- __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \
- __ret_123; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
- float64x2_t __ret;
- __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
- return __ret;
- }
- #else
- __ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
- float32x4_t __ret;
- __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
- return __ret;
- }
- #else
- __ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
- float32x2_t __ret;
- __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
- return __ret;
- }
- #else
- __ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
- poly64x1_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1);
- return __ret;
- }
- #else
- __ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x1_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1);
- return __ret;
- }
- __ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
- poly64x1_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vget_high_f64(float64x2_t __p0) {
- float64x1_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 1);
- return __ret;
- }
- #else
- __ai float64x1_t vget_high_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x1_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_p64(__p0, __p1) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64_t __ret; \
- __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_p64(__p0, __p1) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64_t __ret; \
- __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_p64(__p0, __p1) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64_t __ret; \
- __ret = (poly64_t) __builtin_neon_vget_lane_i64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64_t __ret; \
- __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- poly64_t __ret; \
- __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64_t __ret; \
- __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__rev0, __p1); \
- __ret; \
- })
- #define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vgetq_lane_f64((int8x16_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #else
- #define vget_lane_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #define __noswap_vget_lane_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64_t __ret; \
- __ret = (float64_t) __builtin_neon_vget_lane_f64((int8x8_t)__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
- poly64x1_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0);
- return __ret;
- }
- #else
- __ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x1_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vget_low_f64(float64x2_t __p0) {
- float64x1_t __ret;
- __ret = __builtin_shufflevector(__p0, __p0, 0);
- return __ret;
- }
- #else
- __ai float64x1_t vget_low_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x1_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev0, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p64(__p0) __extension__ ({ \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
- __ret; \
- })
- #else
- #define vld1_p64(__p0) __extension__ ({ \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p64(__p0) __extension__ ({ \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
- __ret; \
- })
- #else
- #define vld1q_p64(__p0) __extension__ ({ \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f64(__p0) __extension__ ({ \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
- __ret; \
- })
- #else
- #define vld1q_f64(__p0) __extension__ ({ \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f64(__p0) __extension__ ({ \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
- __ret; \
- })
- #else
- #define vld1_f64(__p0) __extension__ ({ \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_p64(__p0) __extension__ ({ \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
- __ret; \
- })
- #else
- #define vld1_dup_p64(__p0) __extension__ ({ \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_p64(__p0) __extension__ ({ \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
- __ret; \
- })
- #else
- #define vld1q_dup_p64(__p0) __extension__ ({ \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_dup_f64(__p0) __extension__ ({ \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
- __ret; \
- })
- #else
- #define vld1q_dup_f64(__p0) __extension__ ({ \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_dup_f64(__p0) __extension__ ({ \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
- __ret; \
- })
- #else
- #define vld1_dup_f64(__p0) __extension__ ({ \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
- __ret; \
- })
- #else
- #define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
- __ret; \
- })
- #else
- #define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s1 = __p1; \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
- __ret; \
- })
- #else
- #define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s1 = __p1; \
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1_t __s1 = __p1; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
- __ret; \
- })
- #else
- #define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1_t __s1 = __p1; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p8_x2(__p0) __extension__ ({ \
- poly8x8x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
- __ret; \
- })
- #else
- #define vld1_p8_x2(__p0) __extension__ ({ \
- poly8x8x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p64_x2(__p0) __extension__ ({ \
- poly64x1x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
- __ret; \
- })
- #else
- #define vld1_p64_x2(__p0) __extension__ ({ \
- poly64x1x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p16_x2(__p0) __extension__ ({ \
- poly16x4x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
- __ret; \
- })
- #else
- #define vld1_p16_x2(__p0) __extension__ ({ \
- poly16x4x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p8_x2(__p0) __extension__ ({ \
- poly8x16x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
- __ret; \
- })
- #else
- #define vld1q_p8_x2(__p0) __extension__ ({ \
- poly8x16x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p64_x2(__p0) __extension__ ({ \
- poly64x2x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
- __ret; \
- })
- #else
- #define vld1q_p64_x2(__p0) __extension__ ({ \
- poly64x2x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p16_x2(__p0) __extension__ ({ \
- poly16x8x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
- __ret; \
- })
- #else
- #define vld1q_p16_x2(__p0) __extension__ ({ \
- poly16x8x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u8_x2(__p0) __extension__ ({ \
- uint8x16x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
- __ret; \
- })
- #else
- #define vld1q_u8_x2(__p0) __extension__ ({ \
- uint8x16x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u32_x2(__p0) __extension__ ({ \
- uint32x4x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
- __ret; \
- })
- #else
- #define vld1q_u32_x2(__p0) __extension__ ({ \
- uint32x4x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u64_x2(__p0) __extension__ ({ \
- uint64x2x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
- __ret; \
- })
- #else
- #define vld1q_u64_x2(__p0) __extension__ ({ \
- uint64x2x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u16_x2(__p0) __extension__ ({ \
- uint16x8x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
- __ret; \
- })
- #else
- #define vld1q_u16_x2(__p0) __extension__ ({ \
- uint16x8x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s8_x2(__p0) __extension__ ({ \
- int8x16x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
- __ret; \
- })
- #else
- #define vld1q_s8_x2(__p0) __extension__ ({ \
- int8x16x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f64_x2(__p0) __extension__ ({ \
- float64x2x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
- __ret; \
- })
- #else
- #define vld1q_f64_x2(__p0) __extension__ ({ \
- float64x2x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f32_x2(__p0) __extension__ ({ \
- float32x4x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
- __ret; \
- })
- #else
- #define vld1q_f32_x2(__p0) __extension__ ({ \
- float32x4x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f16_x2(__p0) __extension__ ({ \
- float16x8x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
- __ret; \
- })
- #else
- #define vld1q_f16_x2(__p0) __extension__ ({ \
- float16x8x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s32_x2(__p0) __extension__ ({ \
- int32x4x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
- __ret; \
- })
- #else
- #define vld1q_s32_x2(__p0) __extension__ ({ \
- int32x4x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s64_x2(__p0) __extension__ ({ \
- int64x2x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
- __ret; \
- })
- #else
- #define vld1q_s64_x2(__p0) __extension__ ({ \
- int64x2x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s16_x2(__p0) __extension__ ({ \
- int16x8x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
- __ret; \
- })
- #else
- #define vld1q_s16_x2(__p0) __extension__ ({ \
- int16x8x2_t __ret; \
- __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u8_x2(__p0) __extension__ ({ \
- uint8x8x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
- __ret; \
- })
- #else
- #define vld1_u8_x2(__p0) __extension__ ({ \
- uint8x8x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u32_x2(__p0) __extension__ ({ \
- uint32x2x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
- __ret; \
- })
- #else
- #define vld1_u32_x2(__p0) __extension__ ({ \
- uint32x2x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u64_x2(__p0) __extension__ ({ \
- uint64x1x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
- __ret; \
- })
- #else
- #define vld1_u64_x2(__p0) __extension__ ({ \
- uint64x1x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u16_x2(__p0) __extension__ ({ \
- uint16x4x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
- __ret; \
- })
- #else
- #define vld1_u16_x2(__p0) __extension__ ({ \
- uint16x4x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s8_x2(__p0) __extension__ ({ \
- int8x8x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
- __ret; \
- })
- #else
- #define vld1_s8_x2(__p0) __extension__ ({ \
- int8x8x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f64_x2(__p0) __extension__ ({ \
- float64x1x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
- __ret; \
- })
- #else
- #define vld1_f64_x2(__p0) __extension__ ({ \
- float64x1x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f32_x2(__p0) __extension__ ({ \
- float32x2x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
- __ret; \
- })
- #else
- #define vld1_f32_x2(__p0) __extension__ ({ \
- float32x2x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f16_x2(__p0) __extension__ ({ \
- float16x4x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
- __ret; \
- })
- #else
- #define vld1_f16_x2(__p0) __extension__ ({ \
- float16x4x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s32_x2(__p0) __extension__ ({ \
- int32x2x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
- __ret; \
- })
- #else
- #define vld1_s32_x2(__p0) __extension__ ({ \
- int32x2x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s64_x2(__p0) __extension__ ({ \
- int64x1x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
- __ret; \
- })
- #else
- #define vld1_s64_x2(__p0) __extension__ ({ \
- int64x1x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s16_x2(__p0) __extension__ ({ \
- int16x4x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
- __ret; \
- })
- #else
- #define vld1_s16_x2(__p0) __extension__ ({ \
- int16x4x2_t __ret; \
- __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p8_x3(__p0) __extension__ ({ \
- poly8x8x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
- __ret; \
- })
- #else
- #define vld1_p8_x3(__p0) __extension__ ({ \
- poly8x8x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p64_x3(__p0) __extension__ ({ \
- poly64x1x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
- __ret; \
- })
- #else
- #define vld1_p64_x3(__p0) __extension__ ({ \
- poly64x1x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p16_x3(__p0) __extension__ ({ \
- poly16x4x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
- __ret; \
- })
- #else
- #define vld1_p16_x3(__p0) __extension__ ({ \
- poly16x4x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p8_x3(__p0) __extension__ ({ \
- poly8x16x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
- __ret; \
- })
- #else
- #define vld1q_p8_x3(__p0) __extension__ ({ \
- poly8x16x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p64_x3(__p0) __extension__ ({ \
- poly64x2x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
- __ret; \
- })
- #else
- #define vld1q_p64_x3(__p0) __extension__ ({ \
- poly64x2x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p16_x3(__p0) __extension__ ({ \
- poly16x8x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
- __ret; \
- })
- #else
- #define vld1q_p16_x3(__p0) __extension__ ({ \
- poly16x8x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u8_x3(__p0) __extension__ ({ \
- uint8x16x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
- __ret; \
- })
- #else
- #define vld1q_u8_x3(__p0) __extension__ ({ \
- uint8x16x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u32_x3(__p0) __extension__ ({ \
- uint32x4x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
- __ret; \
- })
- #else
- #define vld1q_u32_x3(__p0) __extension__ ({ \
- uint32x4x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u64_x3(__p0) __extension__ ({ \
- uint64x2x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
- __ret; \
- })
- #else
- #define vld1q_u64_x3(__p0) __extension__ ({ \
- uint64x2x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u16_x3(__p0) __extension__ ({ \
- uint16x8x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
- __ret; \
- })
- #else
- #define vld1q_u16_x3(__p0) __extension__ ({ \
- uint16x8x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s8_x3(__p0) __extension__ ({ \
- int8x16x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
- __ret; \
- })
- #else
- #define vld1q_s8_x3(__p0) __extension__ ({ \
- int8x16x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f64_x3(__p0) __extension__ ({ \
- float64x2x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
- __ret; \
- })
- #else
- #define vld1q_f64_x3(__p0) __extension__ ({ \
- float64x2x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f32_x3(__p0) __extension__ ({ \
- float32x4x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
- __ret; \
- })
- #else
- #define vld1q_f32_x3(__p0) __extension__ ({ \
- float32x4x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f16_x3(__p0) __extension__ ({ \
- float16x8x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
- __ret; \
- })
- #else
- #define vld1q_f16_x3(__p0) __extension__ ({ \
- float16x8x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s32_x3(__p0) __extension__ ({ \
- int32x4x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
- __ret; \
- })
- #else
- #define vld1q_s32_x3(__p0) __extension__ ({ \
- int32x4x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s64_x3(__p0) __extension__ ({ \
- int64x2x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
- __ret; \
- })
- #else
- #define vld1q_s64_x3(__p0) __extension__ ({ \
- int64x2x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s16_x3(__p0) __extension__ ({ \
- int16x8x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
- __ret; \
- })
- #else
- #define vld1q_s16_x3(__p0) __extension__ ({ \
- int16x8x3_t __ret; \
- __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u8_x3(__p0) __extension__ ({ \
- uint8x8x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
- __ret; \
- })
- #else
- #define vld1_u8_x3(__p0) __extension__ ({ \
- uint8x8x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u32_x3(__p0) __extension__ ({ \
- uint32x2x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
- __ret; \
- })
- #else
- #define vld1_u32_x3(__p0) __extension__ ({ \
- uint32x2x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u64_x3(__p0) __extension__ ({ \
- uint64x1x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
- __ret; \
- })
- #else
- #define vld1_u64_x3(__p0) __extension__ ({ \
- uint64x1x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u16_x3(__p0) __extension__ ({ \
- uint16x4x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
- __ret; \
- })
- #else
- #define vld1_u16_x3(__p0) __extension__ ({ \
- uint16x4x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s8_x3(__p0) __extension__ ({ \
- int8x8x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
- __ret; \
- })
- #else
- #define vld1_s8_x3(__p0) __extension__ ({ \
- int8x8x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f64_x3(__p0) __extension__ ({ \
- float64x1x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
- __ret; \
- })
- #else
- #define vld1_f64_x3(__p0) __extension__ ({ \
- float64x1x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f32_x3(__p0) __extension__ ({ \
- float32x2x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
- __ret; \
- })
- #else
- #define vld1_f32_x3(__p0) __extension__ ({ \
- float32x2x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f16_x3(__p0) __extension__ ({ \
- float16x4x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
- __ret; \
- })
- #else
- #define vld1_f16_x3(__p0) __extension__ ({ \
- float16x4x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s32_x3(__p0) __extension__ ({ \
- int32x2x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
- __ret; \
- })
- #else
- #define vld1_s32_x3(__p0) __extension__ ({ \
- int32x2x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s64_x3(__p0) __extension__ ({ \
- int64x1x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
- __ret; \
- })
- #else
- #define vld1_s64_x3(__p0) __extension__ ({ \
- int64x1x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s16_x3(__p0) __extension__ ({ \
- int16x4x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
- __ret; \
- })
- #else
- #define vld1_s16_x3(__p0) __extension__ ({ \
- int16x4x3_t __ret; \
- __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p8_x4(__p0) __extension__ ({ \
- poly8x8x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
- __ret; \
- })
- #else
- #define vld1_p8_x4(__p0) __extension__ ({ \
- poly8x8x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p64_x4(__p0) __extension__ ({ \
- poly64x1x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
- __ret; \
- })
- #else
- #define vld1_p64_x4(__p0) __extension__ ({ \
- poly64x1x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_p16_x4(__p0) __extension__ ({ \
- poly16x4x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
- __ret; \
- })
- #else
- #define vld1_p16_x4(__p0) __extension__ ({ \
- poly16x4x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p8_x4(__p0) __extension__ ({ \
- poly8x16x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
- __ret; \
- })
- #else
- #define vld1q_p8_x4(__p0) __extension__ ({ \
- poly8x16x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p64_x4(__p0) __extension__ ({ \
- poly64x2x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
- __ret; \
- })
- #else
- #define vld1q_p64_x4(__p0) __extension__ ({ \
- poly64x2x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_p16_x4(__p0) __extension__ ({ \
- poly16x8x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
- __ret; \
- })
- #else
- #define vld1q_p16_x4(__p0) __extension__ ({ \
- poly16x8x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u8_x4(__p0) __extension__ ({ \
- uint8x16x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
- __ret; \
- })
- #else
- #define vld1q_u8_x4(__p0) __extension__ ({ \
- uint8x16x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u32_x4(__p0) __extension__ ({ \
- uint32x4x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
- __ret; \
- })
- #else
- #define vld1q_u32_x4(__p0) __extension__ ({ \
- uint32x4x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u64_x4(__p0) __extension__ ({ \
- uint64x2x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
- __ret; \
- })
- #else
- #define vld1q_u64_x4(__p0) __extension__ ({ \
- uint64x2x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_u16_x4(__p0) __extension__ ({ \
- uint16x8x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
- __ret; \
- })
- #else
- #define vld1q_u16_x4(__p0) __extension__ ({ \
- uint16x8x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s8_x4(__p0) __extension__ ({ \
- int8x16x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
- __ret; \
- })
- #else
- #define vld1q_s8_x4(__p0) __extension__ ({ \
- int8x16x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f64_x4(__p0) __extension__ ({ \
- float64x2x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
- __ret; \
- })
- #else
- #define vld1q_f64_x4(__p0) __extension__ ({ \
- float64x2x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f32_x4(__p0) __extension__ ({ \
- float32x4x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
- __ret; \
- })
- #else
- #define vld1q_f32_x4(__p0) __extension__ ({ \
- float32x4x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_f16_x4(__p0) __extension__ ({ \
- float16x8x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
- __ret; \
- })
- #else
- #define vld1q_f16_x4(__p0) __extension__ ({ \
- float16x8x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s32_x4(__p0) __extension__ ({ \
- int32x4x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
- __ret; \
- })
- #else
- #define vld1q_s32_x4(__p0) __extension__ ({ \
- int32x4x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s64_x4(__p0) __extension__ ({ \
- int64x2x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
- __ret; \
- })
- #else
- #define vld1q_s64_x4(__p0) __extension__ ({ \
- int64x2x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1q_s16_x4(__p0) __extension__ ({ \
- int16x8x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
- __ret; \
- })
- #else
- #define vld1q_s16_x4(__p0) __extension__ ({ \
- int16x8x4_t __ret; \
- __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u8_x4(__p0) __extension__ ({ \
- uint8x8x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
- __ret; \
- })
- #else
- #define vld1_u8_x4(__p0) __extension__ ({ \
- uint8x8x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u32_x4(__p0) __extension__ ({ \
- uint32x2x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
- __ret; \
- })
- #else
- #define vld1_u32_x4(__p0) __extension__ ({ \
- uint32x2x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u64_x4(__p0) __extension__ ({ \
- uint64x1x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
- __ret; \
- })
- #else
- #define vld1_u64_x4(__p0) __extension__ ({ \
- uint64x1x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_u16_x4(__p0) __extension__ ({ \
- uint16x4x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
- __ret; \
- })
- #else
- #define vld1_u16_x4(__p0) __extension__ ({ \
- uint16x4x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s8_x4(__p0) __extension__ ({ \
- int8x8x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
- __ret; \
- })
- #else
- #define vld1_s8_x4(__p0) __extension__ ({ \
- int8x8x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f64_x4(__p0) __extension__ ({ \
- float64x1x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
- __ret; \
- })
- #else
- #define vld1_f64_x4(__p0) __extension__ ({ \
- float64x1x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f32_x4(__p0) __extension__ ({ \
- float32x2x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
- __ret; \
- })
- #else
- #define vld1_f32_x4(__p0) __extension__ ({ \
- float32x2x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_f16_x4(__p0) __extension__ ({ \
- float16x4x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
- __ret; \
- })
- #else
- #define vld1_f16_x4(__p0) __extension__ ({ \
- float16x4x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s32_x4(__p0) __extension__ ({ \
- int32x2x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
- __ret; \
- })
- #else
- #define vld1_s32_x4(__p0) __extension__ ({ \
- int32x2x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s64_x4(__p0) __extension__ ({ \
- int64x1x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
- __ret; \
- })
- #else
- #define vld1_s64_x4(__p0) __extension__ ({ \
- int64x1x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld1_s16_x4(__p0) __extension__ ({ \
- int16x4x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
- __ret; \
- })
- #else
- #define vld1_s16_x4(__p0) __extension__ ({ \
- int16x4x4_t __ret; \
- __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_p64(__p0) __extension__ ({ \
- poly64x1x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 6); \
- __ret; \
- })
- #else
- #define vld2_p64(__p0) __extension__ ({ \
- poly64x1x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_p64(__p0) __extension__ ({ \
- poly64x2x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 38); \
- __ret; \
- })
- #else
- #define vld2q_p64(__p0) __extension__ ({ \
- poly64x2x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_u64(__p0) __extension__ ({ \
- uint64x2x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 51); \
- __ret; \
- })
- #else
- #define vld2q_u64(__p0) __extension__ ({ \
- uint64x2x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_f64(__p0) __extension__ ({ \
- float64x2x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 42); \
- __ret; \
- })
- #else
- #define vld2q_f64(__p0) __extension__ ({ \
- float64x2x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_s64(__p0) __extension__ ({ \
- int64x2x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 35); \
- __ret; \
- })
- #else
- #define vld2q_s64(__p0) __extension__ ({ \
- int64x2x2_t __ret; \
- __builtin_neon_vld2q_v(&__ret, __p0, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_f64(__p0) __extension__ ({ \
- float64x1x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 10); \
- __ret; \
- })
- #else
- #define vld2_f64(__p0) __extension__ ({ \
- float64x1x2_t __ret; \
- __builtin_neon_vld2_v(&__ret, __p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_p64(__p0) __extension__ ({ \
- poly64x1x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
- __ret; \
- })
- #else
- #define vld2_dup_p64(__p0) __extension__ ({ \
- poly64x1x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_p8(__p0) __extension__ ({ \
- poly8x16x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
- __ret; \
- })
- #else
- #define vld2q_dup_p8(__p0) __extension__ ({ \
- poly8x16x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_p64(__p0) __extension__ ({ \
- poly64x2x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
- __ret; \
- })
- #else
- #define vld2q_dup_p64(__p0) __extension__ ({ \
- poly64x2x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_p16(__p0) __extension__ ({ \
- poly16x8x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
- __ret; \
- })
- #else
- #define vld2q_dup_p16(__p0) __extension__ ({ \
- poly16x8x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_u8(__p0) __extension__ ({ \
- uint8x16x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
- __ret; \
- })
- #else
- #define vld2q_dup_u8(__p0) __extension__ ({ \
- uint8x16x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_u32(__p0) __extension__ ({ \
- uint32x4x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
- __ret; \
- })
- #else
- #define vld2q_dup_u32(__p0) __extension__ ({ \
- uint32x4x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_u64(__p0) __extension__ ({ \
- uint64x2x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
- __ret; \
- })
- #else
- #define vld2q_dup_u64(__p0) __extension__ ({ \
- uint64x2x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_u16(__p0) __extension__ ({ \
- uint16x8x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
- __ret; \
- })
- #else
- #define vld2q_dup_u16(__p0) __extension__ ({ \
- uint16x8x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_s8(__p0) __extension__ ({ \
- int8x16x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
- __ret; \
- })
- #else
- #define vld2q_dup_s8(__p0) __extension__ ({ \
- int8x16x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_f64(__p0) __extension__ ({ \
- float64x2x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
- __ret; \
- })
- #else
- #define vld2q_dup_f64(__p0) __extension__ ({ \
- float64x2x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_f32(__p0) __extension__ ({ \
- float32x4x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
- __ret; \
- })
- #else
- #define vld2q_dup_f32(__p0) __extension__ ({ \
- float32x4x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_f16(__p0) __extension__ ({ \
- float16x8x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
- __ret; \
- })
- #else
- #define vld2q_dup_f16(__p0) __extension__ ({ \
- float16x8x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_s32(__p0) __extension__ ({ \
- int32x4x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
- __ret; \
- })
- #else
- #define vld2q_dup_s32(__p0) __extension__ ({ \
- int32x4x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_s64(__p0) __extension__ ({ \
- int64x2x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
- __ret; \
- })
- #else
- #define vld2q_dup_s64(__p0) __extension__ ({ \
- int64x2x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_dup_s16(__p0) __extension__ ({ \
- int16x8x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
- __ret; \
- })
- #else
- #define vld2q_dup_s16(__p0) __extension__ ({ \
- int16x8x2_t __ret; \
- __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_dup_f64(__p0) __extension__ ({ \
- float64x1x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
- __ret; \
- })
- #else
- #define vld2_dup_f64(__p0) __extension__ ({ \
- float64x1x2_t __ret; \
- __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x2_t __s1 = __p1; \
- poly64x1x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
- __ret; \
- })
- #else
- #define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x2_t __s1 = __p1; \
- poly64x1x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x2_t __s1 = __p1; \
- poly8x16x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
- __ret; \
- })
- #else
- #define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x2_t __s1 = __p1; \
- poly8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x2_t __s1 = __p1; \
- poly64x2x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
- __ret; \
- })
- #else
- #define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x2_t __s1 = __p1; \
- poly64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- poly64x2x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x2_t __s1 = __p1; \
- uint8x16x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
- __ret; \
- })
- #else
- #define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x2_t __s1 = __p1; \
- uint8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x2_t __s1 = __p1; \
- uint64x2x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
- __ret; \
- })
- #else
- #define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x2_t __s1 = __p1; \
- uint64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- uint64x2x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x2_t __s1 = __p1; \
- int8x16x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
- __ret; \
- })
- #else
- #define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x2_t __s1 = __p1; \
- int8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x2_t __s1 = __p1; \
- float64x2x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 42); \
- __ret; \
- })
- #else
- #define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x2_t __s1 = __p1; \
- float64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- float64x2x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x2_t __s1 = __p1; \
- int64x2x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 35); \
- __ret; \
- })
- #else
- #define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x2_t __s1 = __p1; \
- int64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- int64x2x2_t __ret; \
- __builtin_neon_vld2q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x2_t __s1 = __p1; \
- uint64x1x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
- __ret; \
- })
- #else
- #define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x2_t __s1 = __p1; \
- uint64x1x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x2_t __s1 = __p1; \
- float64x1x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
- __ret; \
- })
- #else
- #define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x2_t __s1 = __p1; \
- float64x1x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x2_t __s1 = __p1; \
- int64x1x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
- __ret; \
- })
- #else
- #define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x2_t __s1 = __p1; \
- int64x1x2_t __ret; \
- __builtin_neon_vld2_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __p2, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_p64(__p0) __extension__ ({ \
- poly64x1x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 6); \
- __ret; \
- })
- #else
- #define vld3_p64(__p0) __extension__ ({ \
- poly64x1x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_p64(__p0) __extension__ ({ \
- poly64x2x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 38); \
- __ret; \
- })
- #else
- #define vld3q_p64(__p0) __extension__ ({ \
- poly64x2x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_u64(__p0) __extension__ ({ \
- uint64x2x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 51); \
- __ret; \
- })
- #else
- #define vld3q_u64(__p0) __extension__ ({ \
- uint64x2x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_f64(__p0) __extension__ ({ \
- float64x2x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 42); \
- __ret; \
- })
- #else
- #define vld3q_f64(__p0) __extension__ ({ \
- float64x2x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_s64(__p0) __extension__ ({ \
- int64x2x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 35); \
- __ret; \
- })
- #else
- #define vld3q_s64(__p0) __extension__ ({ \
- int64x2x3_t __ret; \
- __builtin_neon_vld3q_v(&__ret, __p0, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_f64(__p0) __extension__ ({ \
- float64x1x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 10); \
- __ret; \
- })
- #else
- #define vld3_f64(__p0) __extension__ ({ \
- float64x1x3_t __ret; \
- __builtin_neon_vld3_v(&__ret, __p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_p64(__p0) __extension__ ({ \
- poly64x1x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
- __ret; \
- })
- #else
- #define vld3_dup_p64(__p0) __extension__ ({ \
- poly64x1x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_p8(__p0) __extension__ ({ \
- poly8x16x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
- __ret; \
- })
- #else
- #define vld3q_dup_p8(__p0) __extension__ ({ \
- poly8x16x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_p64(__p0) __extension__ ({ \
- poly64x2x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
- __ret; \
- })
- #else
- #define vld3q_dup_p64(__p0) __extension__ ({ \
- poly64x2x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_p16(__p0) __extension__ ({ \
- poly16x8x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
- __ret; \
- })
- #else
- #define vld3q_dup_p16(__p0) __extension__ ({ \
- poly16x8x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_u8(__p0) __extension__ ({ \
- uint8x16x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
- __ret; \
- })
- #else
- #define vld3q_dup_u8(__p0) __extension__ ({ \
- uint8x16x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_u32(__p0) __extension__ ({ \
- uint32x4x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
- __ret; \
- })
- #else
- #define vld3q_dup_u32(__p0) __extension__ ({ \
- uint32x4x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_u64(__p0) __extension__ ({ \
- uint64x2x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
- __ret; \
- })
- #else
- #define vld3q_dup_u64(__p0) __extension__ ({ \
- uint64x2x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_u16(__p0) __extension__ ({ \
- uint16x8x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
- __ret; \
- })
- #else
- #define vld3q_dup_u16(__p0) __extension__ ({ \
- uint16x8x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_s8(__p0) __extension__ ({ \
- int8x16x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
- __ret; \
- })
- #else
- #define vld3q_dup_s8(__p0) __extension__ ({ \
- int8x16x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_f64(__p0) __extension__ ({ \
- float64x2x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
- __ret; \
- })
- #else
- #define vld3q_dup_f64(__p0) __extension__ ({ \
- float64x2x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_f32(__p0) __extension__ ({ \
- float32x4x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
- __ret; \
- })
- #else
- #define vld3q_dup_f32(__p0) __extension__ ({ \
- float32x4x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_f16(__p0) __extension__ ({ \
- float16x8x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
- __ret; \
- })
- #else
- #define vld3q_dup_f16(__p0) __extension__ ({ \
- float16x8x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_s32(__p0) __extension__ ({ \
- int32x4x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
- __ret; \
- })
- #else
- #define vld3q_dup_s32(__p0) __extension__ ({ \
- int32x4x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_s64(__p0) __extension__ ({ \
- int64x2x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
- __ret; \
- })
- #else
- #define vld3q_dup_s64(__p0) __extension__ ({ \
- int64x2x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_dup_s16(__p0) __extension__ ({ \
- int16x8x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
- __ret; \
- })
- #else
- #define vld3q_dup_s16(__p0) __extension__ ({ \
- int16x8x3_t __ret; \
- __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_dup_f64(__p0) __extension__ ({ \
- float64x1x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
- __ret; \
- })
- #else
- #define vld3_dup_f64(__p0) __extension__ ({ \
- float64x1x3_t __ret; \
- __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x3_t __s1 = __p1; \
- poly64x1x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
- __ret; \
- })
- #else
- #define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x3_t __s1 = __p1; \
- poly64x1x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x3_t __s1 = __p1; \
- poly8x16x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
- __ret; \
- })
- #else
- #define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x3_t __s1 = __p1; \
- poly8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x3_t __s1 = __p1; \
- poly64x2x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
- __ret; \
- })
- #else
- #define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x3_t __s1 = __p1; \
- poly64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- poly64x2x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x3_t __s1 = __p1; \
- uint8x16x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
- __ret; \
- })
- #else
- #define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x3_t __s1 = __p1; \
- uint8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x3_t __s1 = __p1; \
- uint64x2x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
- __ret; \
- })
- #else
- #define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x3_t __s1 = __p1; \
- uint64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- uint64x2x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x3_t __s1 = __p1; \
- int8x16x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
- __ret; \
- })
- #else
- #define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x3_t __s1 = __p1; \
- int8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x3_t __s1 = __p1; \
- float64x2x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
- __ret; \
- })
- #else
- #define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x3_t __s1 = __p1; \
- float64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- float64x2x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x3_t __s1 = __p1; \
- int64x2x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
- __ret; \
- })
- #else
- #define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x3_t __s1 = __p1; \
- int64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- int64x2x3_t __ret; \
- __builtin_neon_vld3q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x3_t __s1 = __p1; \
- uint64x1x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
- __ret; \
- })
- #else
- #define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x3_t __s1 = __p1; \
- uint64x1x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x3_t __s1 = __p1; \
- float64x1x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
- __ret; \
- })
- #else
- #define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x3_t __s1 = __p1; \
- float64x1x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x3_t __s1 = __p1; \
- int64x1x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
- __ret; \
- })
- #else
- #define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x3_t __s1 = __p1; \
- int64x1x3_t __ret; \
- __builtin_neon_vld3_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_p64(__p0) __extension__ ({ \
- poly64x1x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 6); \
- __ret; \
- })
- #else
- #define vld4_p64(__p0) __extension__ ({ \
- poly64x1x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_p64(__p0) __extension__ ({ \
- poly64x2x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 38); \
- __ret; \
- })
- #else
- #define vld4q_p64(__p0) __extension__ ({ \
- poly64x2x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_u64(__p0) __extension__ ({ \
- uint64x2x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 51); \
- __ret; \
- })
- #else
- #define vld4q_u64(__p0) __extension__ ({ \
- uint64x2x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_f64(__p0) __extension__ ({ \
- float64x2x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 42); \
- __ret; \
- })
- #else
- #define vld4q_f64(__p0) __extension__ ({ \
- float64x2x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_s64(__p0) __extension__ ({ \
- int64x2x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 35); \
- __ret; \
- })
- #else
- #define vld4q_s64(__p0) __extension__ ({ \
- int64x2x4_t __ret; \
- __builtin_neon_vld4q_v(&__ret, __p0, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_f64(__p0) __extension__ ({ \
- float64x1x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 10); \
- __ret; \
- })
- #else
- #define vld4_f64(__p0) __extension__ ({ \
- float64x1x4_t __ret; \
- __builtin_neon_vld4_v(&__ret, __p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_p64(__p0) __extension__ ({ \
- poly64x1x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
- __ret; \
- })
- #else
- #define vld4_dup_p64(__p0) __extension__ ({ \
- poly64x1x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_p8(__p0) __extension__ ({ \
- poly8x16x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
- __ret; \
- })
- #else
- #define vld4q_dup_p8(__p0) __extension__ ({ \
- poly8x16x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_p64(__p0) __extension__ ({ \
- poly64x2x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
- __ret; \
- })
- #else
- #define vld4q_dup_p64(__p0) __extension__ ({ \
- poly64x2x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_p16(__p0) __extension__ ({ \
- poly16x8x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
- __ret; \
- })
- #else
- #define vld4q_dup_p16(__p0) __extension__ ({ \
- poly16x8x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_u8(__p0) __extension__ ({ \
- uint8x16x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
- __ret; \
- })
- #else
- #define vld4q_dup_u8(__p0) __extension__ ({ \
- uint8x16x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_u32(__p0) __extension__ ({ \
- uint32x4x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
- __ret; \
- })
- #else
- #define vld4q_dup_u32(__p0) __extension__ ({ \
- uint32x4x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_u64(__p0) __extension__ ({ \
- uint64x2x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
- __ret; \
- })
- #else
- #define vld4q_dup_u64(__p0) __extension__ ({ \
- uint64x2x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_u16(__p0) __extension__ ({ \
- uint16x8x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
- __ret; \
- })
- #else
- #define vld4q_dup_u16(__p0) __extension__ ({ \
- uint16x8x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_s8(__p0) __extension__ ({ \
- int8x16x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
- __ret; \
- })
- #else
- #define vld4q_dup_s8(__p0) __extension__ ({ \
- int8x16x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_f64(__p0) __extension__ ({ \
- float64x2x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
- __ret; \
- })
- #else
- #define vld4q_dup_f64(__p0) __extension__ ({ \
- float64x2x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_f32(__p0) __extension__ ({ \
- float32x4x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
- __ret; \
- })
- #else
- #define vld4q_dup_f32(__p0) __extension__ ({ \
- float32x4x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_f16(__p0) __extension__ ({ \
- float16x8x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
- __ret; \
- })
- #else
- #define vld4q_dup_f16(__p0) __extension__ ({ \
- float16x8x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_s32(__p0) __extension__ ({ \
- int32x4x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
- __ret; \
- })
- #else
- #define vld4q_dup_s32(__p0) __extension__ ({ \
- int32x4x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_s64(__p0) __extension__ ({ \
- int64x2x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
- __ret; \
- })
- #else
- #define vld4q_dup_s64(__p0) __extension__ ({ \
- int64x2x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_dup_s16(__p0) __extension__ ({ \
- int16x8x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
- __ret; \
- })
- #else
- #define vld4q_dup_s16(__p0) __extension__ ({ \
- int16x8x4_t __ret; \
- __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_dup_f64(__p0) __extension__ ({ \
- float64x1x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
- __ret; \
- })
- #else
- #define vld4_dup_f64(__p0) __extension__ ({ \
- float64x1x4_t __ret; \
- __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x4_t __s1 = __p1; \
- poly64x1x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
- __ret; \
- })
- #else
- #define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x4_t __s1 = __p1; \
- poly64x1x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x4_t __s1 = __p1; \
- poly8x16x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
- __ret; \
- })
- #else
- #define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x4_t __s1 = __p1; \
- poly8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- poly8x16x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x4_t __s1 = __p1; \
- poly64x2x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
- __ret; \
- })
- #else
- #define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x4_t __s1 = __p1; \
- poly64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- poly64x2x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x4_t __s1 = __p1; \
- uint8x16x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
- __ret; \
- })
- #else
- #define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x4_t __s1 = __p1; \
- uint8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x4_t __s1 = __p1; \
- uint64x2x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
- __ret; \
- })
- #else
- #define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x4_t __s1 = __p1; \
- uint64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- uint64x2x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x4_t __s1 = __p1; \
- int8x16x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
- __ret; \
- })
- #else
- #define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x4_t __s1 = __p1; \
- int8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x4_t __s1 = __p1; \
- float64x2x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
- __ret; \
- })
- #else
- #define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x4_t __s1 = __p1; \
- float64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- float64x2x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x4_t __s1 = __p1; \
- int64x2x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
- __ret; \
- })
- #else
- #define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x4_t __s1 = __p1; \
- int64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- int64x2x4_t __ret; \
- __builtin_neon_vld4q_lane_v(&__ret, __p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
- \
- __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
- __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
- __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
- __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x4_t __s1 = __p1; \
- uint64x1x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
- __ret; \
- })
- #else
- #define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x4_t __s1 = __p1; \
- uint64x1x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x4_t __s1 = __p1; \
- float64x1x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
- __ret; \
- })
- #else
- #define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x4_t __s1 = __p1; \
- float64x1x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x4_t __s1 = __p1; \
- int64x1x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
- __ret; \
- })
- #else
- #define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x4_t __s1 = __p1; \
- int64x1x4_t __ret; \
- __builtin_neon_vld4_lane_v(&__ret, __p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vldrq_p128(__p0) __extension__ ({ \
- poly128_t __ret; \
- __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
- __ret; \
- })
- #else
- #define vldrq_p128(__p0) __extension__ ({ \
- poly128_t __ret; \
- __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vmaxnmvq_f64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmaxnmvq_f32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vmaxnmv_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmaxnmv_f32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vmaxvq_u8((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vmaxvq_u32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vmaxvq_u16((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vmaxvq_s8(int8x16_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int8_t vmaxvq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vmaxvq_s8((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vmaxvq_f64(float64x2_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float64_t vmaxvq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vmaxvq_f64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vmaxvq_f32(float32x4_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vmaxvq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmaxvq_f32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vmaxvq_s32(int32x4_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int32_t vmaxvq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vmaxvq_s32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vmaxvq_s16(int16x8_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int16_t vmaxvq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vmaxvq_s16((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint8_t vmaxv_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vmaxv_u8((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint32_t vmaxv_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vmaxv_u32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint16_t vmaxv_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vmaxv_u16((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vmaxv_s8(int8x8_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int8_t vmaxv_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vmaxv_s8((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vmaxv_f32(float32x2_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vmaxv_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmaxv_f32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vmaxv_s32(int32x2_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int32_t vmaxv_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vmaxv_s32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vmaxv_s16(int16x4_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int16_t vmaxv_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vmaxv_s16((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vminnmvq_f64(float64x2_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float64_t vminnmvq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vminnmvq_f64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vminnmvq_f32(float32x4_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vminnmvq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vminnmvq_f32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vminnmv_f32(float32x2_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vminnmv_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vminnmv_f32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vminvq_u8(uint8x16_t __p0) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint8_t vminvq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vminvq_u8((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vminvq_u32(uint32x4_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint32_t vminvq_u32(uint32x4_t __p0) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vminvq_u32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vminvq_u16(uint16x8_t __p0) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint16_t vminvq_u16(uint16x8_t __p0) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vminvq_u16((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vminvq_s8(int8x16_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int8_t vminvq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vminvq_s8((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vminvq_f64(float64x2_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float64_t vminvq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vminvq_f64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vminvq_f32(float32x4_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vminvq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vminvq_f32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vminvq_s32(int32x4_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int32_t vminvq_s32(int32x4_t __p0) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vminvq_s32((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vminvq_s16(int16x8_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int16_t vminvq_s16(int16x8_t __p0) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vminvq_s16((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vminv_u8(uint8x8_t __p0) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint8_t vminv_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vminv_u8((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vminv_u32(uint32x2_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint32_t vminv_u32(uint32x2_t __p0) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vminv_u32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vminv_u16(uint16x4_t __p0) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai uint16_t vminv_u16(uint16x4_t __p0) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vminv_u16((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vminv_s8(int8x8_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int8_t vminv_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vminv_s8((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vminv_f32(float32x2_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vminv_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vminv_f32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vminv_s32(int32x2_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int32_t vminv_s32(int32x2_t __p0) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vminv_s32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vminv_s16(int16x4_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai int16_t vminv_s16(int16x4_t __p0) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vminv_s16((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- float64x2_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- float64x2_t __ret;
- __ret = __rev0 + __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #else
- __ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = __p0 + __p1 * __p2;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlaq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint16x8_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlaq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x4_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlaq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x8_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlaq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint32x2_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmla_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint32x2_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint16x4_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmla_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x2_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmla_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- float32x2_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x2_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmla_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x2_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x4_t __ret; \
- __ret = __s0 + __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmla_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __rev0 + __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
- float64x2_t __ret;
- __ret = __p0 + __p1 * (float64x2_t) {__p2, __p2};
- return __ret;
- }
- #else
- __ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __rev0 + __rev1 * (float64x2_t) {__p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint64x2_t __ret; \
- __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- uint64x2_t __ret; \
- __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int64x2_t __ret; \
- __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint64x2_t __ret; \
- __ret = __s0 + vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint64x2_t __ret; \
- __ret = __rev0 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 + vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = __s0 + vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __rev0 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 + vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint64x2_t __ret; \
- __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint64x2_t __ret; \
- __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- float64x2_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- float64x2_t __ret;
- __ret = __rev0 - __rev1 * __rev2;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #else
- __ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
- float64x1_t __ret;
- __ret = __p0 - __p1 * __p2;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlsq_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint16x8_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlsq_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x4_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlsq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlsq_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x8_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmlsq_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint32x2_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmls_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint32x2_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint16x4_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmls_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x2_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmls_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __s2 = __p2; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- float32x2_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x2_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x2_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x4_t __ret; \
- __ret = __s0 - __s1 * __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3); \
- __ret; \
- })
- #else
- #define vmls_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __rev0 - __rev1 * __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
- float64x2_t __ret;
- __ret = __p0 - __p1 * (float64x2_t) {__p2, __p2};
- return __ret;
- }
- #else
- __ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __rev0 - __rev1 * (float64x2_t) {__p2, __p2};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint64x2_t __ret; \
- __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_high_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- uint64x2_t __ret; \
- __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_high_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int64x2_t __ret; \
- __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint64x2_t __ret; \
- __ret = __s0 - vmull_u32(vget_high_u32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_high_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint64x2_t __ret; \
- __ret = __rev0 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 - vmull_u16(vget_high_u16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_high_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = __s0 - vmull_s32(vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __rev0 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 - vmull_s16(vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint64x2_t __ret; \
- __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_laneq_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __s2 = __p2; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint64x2_t __ret; \
- __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_laneq_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x1_t vmov_n_p64(poly64_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t) {__p0};
- return __ret;
- }
- #else
- __ai poly64x1_t vmov_n_p64(poly64_t __p0) {
- poly64x1_t __ret;
- __ret = (poly64x1_t) {__p0};
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
- poly64x2_t __ret;
- __ret = (poly64x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vmovq_n_f64(float64_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) {__p0, __p0};
- return __ret;
- }
- #else
- __ai float64x2_t vmovq_n_f64(float64_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) {__p0, __p0};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vmov_n_f64(float64_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) {__p0};
- return __ret;
- }
- #else
- __ai float64x1_t vmov_n_f64(float64_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) {__p0};
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_124) {
- uint16x8_t __ret_124;
- uint8x8_t __a1_124 = vget_high_u8(__p0_124);
- __ret_124 = (uint16x8_t)(vshll_n_u8(__a1_124, 0));
- return __ret_124;
- }
- #else
- __ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_125) {
- uint8x16_t __rev0_125; __rev0_125 = __builtin_shufflevector(__p0_125, __p0_125, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret_125;
- uint8x8_t __a1_125 = __noswap_vget_high_u8(__rev0_125);
- __ret_125 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_125, 0));
- __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret_125;
- }
- __ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_126) {
- uint16x8_t __ret_126;
- uint8x8_t __a1_126 = __noswap_vget_high_u8(__p0_126);
- __ret_126 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_126, 0));
- return __ret_126;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_127) {
- uint64x2_t __ret_127;
- uint32x2_t __a1_127 = vget_high_u32(__p0_127);
- __ret_127 = (uint64x2_t)(vshll_n_u32(__a1_127, 0));
- return __ret_127;
- }
- #else
- __ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_128) {
- uint32x4_t __rev0_128; __rev0_128 = __builtin_shufflevector(__p0_128, __p0_128, 3, 2, 1, 0);
- uint64x2_t __ret_128;
- uint32x2_t __a1_128 = __noswap_vget_high_u32(__rev0_128);
- __ret_128 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_128, 0));
- __ret_128 = __builtin_shufflevector(__ret_128, __ret_128, 1, 0);
- return __ret_128;
- }
- __ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_129) {
- uint64x2_t __ret_129;
- uint32x2_t __a1_129 = __noswap_vget_high_u32(__p0_129);
- __ret_129 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_129, 0));
- return __ret_129;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_130) {
- uint32x4_t __ret_130;
- uint16x4_t __a1_130 = vget_high_u16(__p0_130);
- __ret_130 = (uint32x4_t)(vshll_n_u16(__a1_130, 0));
- return __ret_130;
- }
- #else
- __ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_131) {
- uint16x8_t __rev0_131; __rev0_131 = __builtin_shufflevector(__p0_131, __p0_131, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret_131;
- uint16x4_t __a1_131 = __noswap_vget_high_u16(__rev0_131);
- __ret_131 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_131, 0));
- __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 3, 2, 1, 0);
- return __ret_131;
- }
- __ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_132) {
- uint32x4_t __ret_132;
- uint16x4_t __a1_132 = __noswap_vget_high_u16(__p0_132);
- __ret_132 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_132, 0));
- return __ret_132;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmovl_high_s8(int8x16_t __p0_133) {
- int16x8_t __ret_133;
- int8x8_t __a1_133 = vget_high_s8(__p0_133);
- __ret_133 = (int16x8_t)(vshll_n_s8(__a1_133, 0));
- return __ret_133;
- }
- #else
- __ai int16x8_t vmovl_high_s8(int8x16_t __p0_134) {
- int8x16_t __rev0_134; __rev0_134 = __builtin_shufflevector(__p0_134, __p0_134, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret_134;
- int8x8_t __a1_134 = __noswap_vget_high_s8(__rev0_134);
- __ret_134 = (int16x8_t)(__noswap_vshll_n_s8(__a1_134, 0));
- __ret_134 = __builtin_shufflevector(__ret_134, __ret_134, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret_134;
- }
- __ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_135) {
- int16x8_t __ret_135;
- int8x8_t __a1_135 = __noswap_vget_high_s8(__p0_135);
- __ret_135 = (int16x8_t)(__noswap_vshll_n_s8(__a1_135, 0));
- return __ret_135;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmovl_high_s32(int32x4_t __p0_136) {
- int64x2_t __ret_136;
- int32x2_t __a1_136 = vget_high_s32(__p0_136);
- __ret_136 = (int64x2_t)(vshll_n_s32(__a1_136, 0));
- return __ret_136;
- }
- #else
- __ai int64x2_t vmovl_high_s32(int32x4_t __p0_137) {
- int32x4_t __rev0_137; __rev0_137 = __builtin_shufflevector(__p0_137, __p0_137, 3, 2, 1, 0);
- int64x2_t __ret_137;
- int32x2_t __a1_137 = __noswap_vget_high_s32(__rev0_137);
- __ret_137 = (int64x2_t)(__noswap_vshll_n_s32(__a1_137, 0));
- __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 1, 0);
- return __ret_137;
- }
- __ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_138) {
- int64x2_t __ret_138;
- int32x2_t __a1_138 = __noswap_vget_high_s32(__p0_138);
- __ret_138 = (int64x2_t)(__noswap_vshll_n_s32(__a1_138, 0));
- return __ret_138;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmovl_high_s16(int16x8_t __p0_139) {
- int32x4_t __ret_139;
- int16x4_t __a1_139 = vget_high_s16(__p0_139);
- __ret_139 = (int32x4_t)(vshll_n_s16(__a1_139, 0));
- return __ret_139;
- }
- #else
- __ai int32x4_t vmovl_high_s16(int16x8_t __p0_140) {
- int16x8_t __rev0_140; __rev0_140 = __builtin_shufflevector(__p0_140, __p0_140, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret_140;
- int16x4_t __a1_140 = __noswap_vget_high_s16(__rev0_140);
- __ret_140 = (int32x4_t)(__noswap_vshll_n_s16(__a1_140, 0));
- __ret_140 = __builtin_shufflevector(__ret_140, __ret_140, 3, 2, 1, 0);
- return __ret_140;
- }
- __ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_141) {
- int32x4_t __ret_141;
- int16x4_t __a1_141 = __noswap_vget_high_s16(__p0_141);
- __ret_141 = (int32x4_t)(__noswap_vshll_n_s16(__a1_141, 0));
- return __ret_141;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
- uint16x8_t __ret;
- __ret = vcombine_u16(__p0, vmovn_u32(__p1));
- return __ret;
- }
- #else
- __ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
- uint32x4_t __ret;
- __ret = vcombine_u32(__p0, vmovn_u64(__p1));
- return __ret;
- }
- #else
- __ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
- uint8x16_t __ret;
- __ret = vcombine_u8(__p0, vmovn_u16(__p1));
- return __ret;
- }
- #else
- __ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
- int16x8_t __ret;
- __ret = vcombine_s16(__p0, vmovn_s32(__p1));
- return __ret;
- }
- #else
- __ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
- int32x4_t __ret;
- __ret = vcombine_s32(__p0, vmovn_s64(__p1));
- return __ret;
- }
- #else
- __ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
- int8x16_t __ret;
- __ret = vcombine_s8(__p0, vmovn_s16(__p1));
- return __ret;
- }
- #else
- __ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __rev0 * __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #else
- __ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = __p0 * __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmuld_lane_f64(__p0_142, __p1_142, __p2_142) __extension__ ({ \
- float64_t __s0_142 = __p0_142; \
- float64x1_t __s1_142 = __p1_142; \
- float64_t __ret_142; \
- __ret_142 = __s0_142 * vget_lane_f64(__s1_142, __p2_142); \
- __ret_142; \
- })
- #else
- #define vmuld_lane_f64(__p0_143, __p1_143, __p2_143) __extension__ ({ \
- float64_t __s0_143 = __p0_143; \
- float64x1_t __s1_143 = __p1_143; \
- float64_t __ret_143; \
- __ret_143 = __s0_143 * __noswap_vget_lane_f64(__s1_143, __p2_143); \
- __ret_143; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmuls_lane_f32(__p0_144, __p1_144, __p2_144) __extension__ ({ \
- float32_t __s0_144 = __p0_144; \
- float32x2_t __s1_144 = __p1_144; \
- float32_t __ret_144; \
- __ret_144 = __s0_144 * vget_lane_f32(__s1_144, __p2_144); \
- __ret_144; \
- })
- #else
- #define vmuls_lane_f32(__p0_145, __p1_145, __p2_145) __extension__ ({ \
- float32_t __s0_145 = __p0_145; \
- float32x2_t __s1_145 = __p1_145; \
- float32x2_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 1, 0); \
- float32_t __ret_145; \
- __ret_145 = __s0_145 * __noswap_vget_lane_f32(__rev1_145, __p2_145); \
- __ret_145; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
- __ret; \
- })
- #else
- #define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x2_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x2_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmuld_laneq_f64(__p0_146, __p1_146, __p2_146) __extension__ ({ \
- float64_t __s0_146 = __p0_146; \
- float64x2_t __s1_146 = __p1_146; \
- float64_t __ret_146; \
- __ret_146 = __s0_146 * vgetq_lane_f64(__s1_146, __p2_146); \
- __ret_146; \
- })
- #else
- #define vmuld_laneq_f64(__p0_147, __p1_147, __p2_147) __extension__ ({ \
- float64_t __s0_147 = __p0_147; \
- float64x2_t __s1_147 = __p1_147; \
- float64x2_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \
- float64_t __ret_147; \
- __ret_147 = __s0_147 * __noswap_vgetq_lane_f64(__rev1_147, __p2_147); \
- __ret_147; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmuls_laneq_f32(__p0_148, __p1_148, __p2_148) __extension__ ({ \
- float32_t __s0_148 = __p0_148; \
- float32x4_t __s1_148 = __p1_148; \
- float32_t __ret_148; \
- __ret_148 = __s0_148 * vgetq_lane_f32(__s1_148, __p2_148); \
- __ret_148; \
- })
- #else
- #define vmuls_laneq_f32(__p0_149, __p1_149, __p2_149) __extension__ ({ \
- float32_t __s0_149 = __p0_149; \
- float32x4_t __s1_149 = __p1_149; \
- float32x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
- float32_t __ret_149; \
- __ret_149 = __s0_149 * __noswap_vgetq_lane_f32(__rev1_149, __p2_149); \
- __ret_149; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
- __ret; \
- })
- #else
- #define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float64x2_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmulq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x2_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x2_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = __s0 * __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2); \
- __ret; \
- })
- #else
- #define vmul_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __rev0 * __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
- return __ret;
- }
- #else
- __ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vmul_n_f64((int8x8_t)__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
- float64x2_t __ret;
- __ret = __p0 * (float64x2_t) {__p1, __p1};
- return __ret;
- }
- #else
- __ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = __rev0 * (float64x2_t) {__p1, __p1};
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
- poly128_t __ret;
- __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
- return __ret;
- }
- #else
- __ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
- poly128_t __ret;
- __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
- return __ret;
- }
- __ai poly128_t __noswap_vmull_p64(poly64_t __p0, poly64_t __p1) {
- poly128_t __ret;
- __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly16x8_t __ret;
- __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
- return __ret;
- }
- #else
- __ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint16x8_t __ret;
- __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
- return __ret;
- }
- #else
- __ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint64x2_t __ret;
- __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
- return __ret;
- }
- #else
- __ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint32x4_t __ret;
- __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
- return __ret;
- }
- #else
- __ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
- int16x8_t __ret;
- __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
- return __ret;
- }
- #else
- __ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
- int64x2_t __ret;
- __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
- return __ret;
- }
- #else
- __ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
- int32x4_t __ret;
- __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
- return __ret;
- }
- #else
- __ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly128_t __ret;
- __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
- return __ret;
- }
- #else
- __ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- poly128_t __ret;
- __ret = __noswap_vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_high_lane_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint64x2_t __ret; \
- __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_high_lane_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = vmull_u32(vget_high_u32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_high_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint64x2_t __ret; \
- __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = vmull_u16(vget_high_u16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_high_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x8_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = vmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
- uint64x2_t __ret;
- __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
- uint32x4_t __ret;
- __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
- int64x2_t __ret;
- __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
- return __ret;
- }
- #else
- __ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
- int32x4_t __ret;
- __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
- return __ret;
- }
- #else
- __ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint64x2_t __ret; \
- __ret = vmull_u32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_laneq_u32(__p0, __p1, __p2) __extension__ ({ \
- uint32x2_t __s0 = __p0; \
- uint32x4_t __s1 = __p1; \
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint64x2_t __ret; \
- __ret = __noswap_vmull_u32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint32x4_t __ret; \
- __ret = vmull_u16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_laneq_u16(__p0, __p1, __p2) __extension__ ({ \
- uint16x4_t __s0 = __p0; \
- uint16x8_t __s1 = __p1; \
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __noswap_vmull_u16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = vmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
- return __ret;
- }
- __ai float64_t __noswap_vmulxd_f64(float64_t __p0, float64_t __p1) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
- return __ret;
- }
- __ai float32_t __noswap_vmulxs_f32(float32_t __p0, float32_t __p1) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxd_lane_f64(__p0_150, __p1_150, __p2_150) __extension__ ({ \
- float64_t __s0_150 = __p0_150; \
- float64x1_t __s1_150 = __p1_150; \
- float64_t __ret_150; \
- __ret_150 = vmulxd_f64(__s0_150, vget_lane_f64(__s1_150, __p2_150)); \
- __ret_150; \
- })
- #else
- #define vmulxd_lane_f64(__p0_151, __p1_151, __p2_151) __extension__ ({ \
- float64_t __s0_151 = __p0_151; \
- float64x1_t __s1_151 = __p1_151; \
- float64_t __ret_151; \
- __ret_151 = __noswap_vmulxd_f64(__s0_151, __noswap_vget_lane_f64(__s1_151, __p2_151)); \
- __ret_151; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxs_lane_f32(__p0_152, __p1_152, __p2_152) __extension__ ({ \
- float32_t __s0_152 = __p0_152; \
- float32x2_t __s1_152 = __p1_152; \
- float32_t __ret_152; \
- __ret_152 = vmulxs_f32(__s0_152, vget_lane_f32(__s1_152, __p2_152)); \
- __ret_152; \
- })
- #else
- #define vmulxs_lane_f32(__p0_153, __p1_153, __p2_153) __extension__ ({ \
- float32_t __s0_153 = __p0_153; \
- float32x2_t __s1_153 = __p1_153; \
- float32x2_t __rev1_153; __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 1, 0); \
- float32_t __ret_153; \
- __ret_153 = __noswap_vmulxs_f32(__s0_153, __noswap_vget_lane_f32(__rev1_153, __p2_153)); \
- __ret_153; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x2_t __ret; \
- __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmulxq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x2_t __ret; \
- __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __ret; \
- __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmulxq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x4_t __ret; \
- __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __ret; \
- __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmulx_lane_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x2_t __s1 = __p1; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float32x2_t __ret; \
- __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxd_laneq_f64(__p0_154, __p1_154, __p2_154) __extension__ ({ \
- float64_t __s0_154 = __p0_154; \
- float64x2_t __s1_154 = __p1_154; \
- float64_t __ret_154; \
- __ret_154 = vmulxd_f64(__s0_154, vgetq_lane_f64(__s1_154, __p2_154)); \
- __ret_154; \
- })
- #else
- #define vmulxd_laneq_f64(__p0_155, __p1_155, __p2_155) __extension__ ({ \
- float64_t __s0_155 = __p0_155; \
- float64x2_t __s1_155 = __p1_155; \
- float64x2_t __rev1_155; __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 1, 0); \
- float64_t __ret_155; \
- __ret_155 = __noswap_vmulxd_f64(__s0_155, __noswap_vgetq_lane_f64(__rev1_155, __p2_155)); \
- __ret_155; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxs_laneq_f32(__p0_156, __p1_156, __p2_156) __extension__ ({ \
- float32_t __s0_156 = __p0_156; \
- float32x4_t __s1_156 = __p1_156; \
- float32_t __ret_156; \
- __ret_156 = vmulxs_f32(__s0_156, vgetq_lane_f32(__s1_156, __p2_156)); \
- __ret_156; \
- })
- #else
- #define vmulxs_laneq_f32(__p0_157, __p1_157, __p2_157) __extension__ ({ \
- float32_t __s0_157 = __p0_157; \
- float32x4_t __s1_157 = __p1_157; \
- float32x4_t __rev1_157; __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 3, 2, 1, 0); \
- float32_t __ret_157; \
- __ret_157 = __noswap_vmulxs_f32(__s0_157, __noswap_vgetq_lane_f32(__rev1_157, __p2_157)); \
- __ret_157; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __ret; \
- __ret = vmulxq_f64(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmulxq_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float64x2_t __ret; \
- __ret = __noswap_vmulxq_f64(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __ret; \
- __ret = vmulxq_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmulxq_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x4_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x4_t __ret; \
- __ret = __noswap_vmulxq_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __ret; \
- __ret = vmulx_f32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vmulx_laneq_f32(__p0, __p1, __p2) __extension__ ({ \
- float32x2_t __s0 = __p0; \
- float32x4_t __s1 = __p1; \
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- float32x2_t __ret; \
- __ret = __noswap_vmulx_f32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vnegq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai float64x2_t vnegq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vnegq_s64(int64x2_t __p0) {
- int64x2_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai int64x2_t vnegq_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = -__rev0;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vneg_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai float64x1_t vneg_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vneg_s64(int64x1_t __p0) {
- int64x1_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #else
- __ai int64x1_t vneg_s64(int64x1_t __p0) {
- int64x1_t __ret;
- __ret = -__p0;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vnegd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vnegd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai uint64_t vpaddd_u64(uint64x2_t __p0) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vpaddd_u64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vpaddd_f64(float64x2_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float64_t vpaddd_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vpaddd_f64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vpaddd_s64(int64x2_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai int64_t vpaddd_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vpaddd_s64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vpadds_f32(float32x2_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vpadds_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vpadds_f32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float64_t vpmaxqd_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vpmaxqd_f64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vpmaxs_f32(float32x2_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vpmaxs_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vpmaxs_f32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vpmaxnms_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vpmaxnms_f32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vpminqd_f64(float64x2_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float64_t vpminqd_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vpminqd_f64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vpmins_f32(float32x2_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vpmins_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vpmins_f32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__p0);
- return __ret;
- }
- #else
- __ai float64_t vpminnmqd_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vpminnmqd_f64((int8x16_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vpminnms_f32(float32x2_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__p0);
- return __ret;
- }
- #else
- __ai float32_t vpminnms_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vpminnms_f32((int8x8_t)__rev0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqabsq_s64(int64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqabsq_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vqabs_s64(int64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vqabs_s64(int64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vqabsb_s8(int8_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
- return __ret;
- }
- #else
- __ai int8_t vqabsb_s8(int8_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqabss_s32(int32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
- return __ret;
- }
- #else
- __ai int32_t vqabss_s32(int32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vqabsd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vqabsd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqabsh_s16(int16_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
- return __ret;
- }
- #else
- __ai int16_t vqabsh_s16(int16_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
- return __ret;
- }
- #else
- __ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
- return __ret;
- }
- #else
- __ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
- return __ret;
- }
- __ai int32_t __noswap_vqadds_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
- return __ret;
- }
- #else
- __ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
- return __ret;
- }
- __ai int16_t __noswap_vqaddh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
- return __ret;
- }
- #else
- __ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
- return __ret;
- }
- #else
- __ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int64x2_t __ret;
- __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
- return __ret;
- }
- #else
- __ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int32x4_t __ret;
- __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
- return __ret;
- }
- #else
- __ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlal_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlal_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = vqdmlal_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlal_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqdmlal_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlal_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
- return __ret;
- }
- #else
- __ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = vqdmlal_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlal_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmlal_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqdmlal_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlal_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmlal_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
- return __ret;
- }
- #else
- __ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
- return __ret;
- }
- #else
- __ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int64x2_t __ret;
- __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
- return __ret;
- }
- #else
- __ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int32x4_t __ret;
- __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
- return __ret;
- }
- #else
- __ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlsl_high_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlsl_high_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = vqdmlsl_s32(__s0, vget_high_s32(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlsl_high_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqdmlsl_s16(__s0, vget_high_s16(__s1), __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlsl_high_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
- return __ret;
- }
- #else
- __ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, (int8x8_t)__rev2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, (int8x8_t)__rev2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, (int8x16_t)__rev2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__s2, __p3); \
- __ret; \
- })
- #else
- #define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, (int8x16_t)__rev2, __p3); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = vqdmlsl_s32(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlsl_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = vqdmlsl_s16(__s0, __s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vqdmlsl_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
- return __ret;
- }
- #else
- __ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
- return __ret;
- }
- __ai int32_t __noswap_vqdmulhs_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
- return __ret;
- }
- #else
- __ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
- return __ret;
- }
- __ai int16_t __noswap_vqdmulhh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulhs_lane_s32(__p0_158, __p1_158, __p2_158) __extension__ ({ \
- int32_t __s0_158 = __p0_158; \
- int32x2_t __s1_158 = __p1_158; \
- int32_t __ret_158; \
- __ret_158 = vqdmulhs_s32(__s0_158, vget_lane_s32(__s1_158, __p2_158)); \
- __ret_158; \
- })
- #else
- #define vqdmulhs_lane_s32(__p0_159, __p1_159, __p2_159) __extension__ ({ \
- int32_t __s0_159 = __p0_159; \
- int32x2_t __s1_159 = __p1_159; \
- int32x2_t __rev1_159; __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 1, 0); \
- int32_t __ret_159; \
- __ret_159 = __noswap_vqdmulhs_s32(__s0_159, __noswap_vget_lane_s32(__rev1_159, __p2_159)); \
- __ret_159; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulhh_lane_s16(__p0_160, __p1_160, __p2_160) __extension__ ({ \
- int16_t __s0_160 = __p0_160; \
- int16x4_t __s1_160 = __p1_160; \
- int16_t __ret_160; \
- __ret_160 = vqdmulhh_s16(__s0_160, vget_lane_s16(__s1_160, __p2_160)); \
- __ret_160; \
- })
- #else
- #define vqdmulhh_lane_s16(__p0_161, __p1_161, __p2_161) __extension__ ({ \
- int16_t __s0_161 = __p0_161; \
- int16x4_t __s1_161 = __p1_161; \
- int16x4_t __rev1_161; __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 3, 2, 1, 0); \
- int16_t __ret_161; \
- __ret_161 = __noswap_vqdmulhh_s16(__s0_161, __noswap_vget_lane_s16(__rev1_161, __p2_161)); \
- __ret_161; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulhs_laneq_s32(__p0_162, __p1_162, __p2_162) __extension__ ({ \
- int32_t __s0_162 = __p0_162; \
- int32x4_t __s1_162 = __p1_162; \
- int32_t __ret_162; \
- __ret_162 = vqdmulhs_s32(__s0_162, vgetq_lane_s32(__s1_162, __p2_162)); \
- __ret_162; \
- })
- #else
- #define vqdmulhs_laneq_s32(__p0_163, __p1_163, __p2_163) __extension__ ({ \
- int32_t __s0_163 = __p0_163; \
- int32x4_t __s1_163 = __p1_163; \
- int32x4_t __rev1_163; __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 3, 2, 1, 0); \
- int32_t __ret_163; \
- __ret_163 = __noswap_vqdmulhs_s32(__s0_163, __noswap_vgetq_lane_s32(__rev1_163, __p2_163)); \
- __ret_163; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulhh_laneq_s16(__p0_164, __p1_164, __p2_164) __extension__ ({ \
- int16_t __s0_164 = __p0_164; \
- int16x8_t __s1_164 = __p1_164; \
- int16_t __ret_164; \
- __ret_164 = vqdmulhh_s16(__s0_164, vgetq_lane_s16(__s1_164, __p2_164)); \
- __ret_164; \
- })
- #else
- #define vqdmulhh_laneq_s16(__p0_165, __p1_165, __p2_165) __extension__ ({ \
- int16_t __s0_165 = __p0_165; \
- int16x8_t __s1_165 = __p1_165; \
- int16x8_t __rev1_165; __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16_t __ret_165; \
- __ret_165 = __noswap_vqdmulhh_s16(__s0_165, __noswap_vgetq_lane_s16(__rev1_165, __p2_165)); \
- __ret_165; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vqdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = vqdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __noswap_vqdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = vqdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __ret; \
- __ret = __noswap_vqdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = vqdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __noswap_vqdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
- return __ret;
- }
- __ai int64_t __noswap_vqdmulls_s32(int32_t __p0, int32_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
- return __ret;
- }
- #else
- __ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
- return __ret;
- }
- __ai int32_t __noswap_vqdmullh_s16(int16_t __p0, int16_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
- int64x2_t __ret;
- __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
- return __ret;
- }
- #else
- __ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
- int32x4_t __ret;
- __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
- return __ret;
- }
- #else
- __ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmull_high_lane_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmull_high_lane_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = vqdmull_s32(vget_high_s32(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmull_high_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vqdmull_s16(vget_high_s16(__s0), __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmull_high_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
- int64x2_t __ret;
- __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
- return __ret;
- }
- #else
- __ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
- int32x4_t __ret;
- __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
- return __ret;
- }
- #else
- __ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulls_lane_s32(__p0_166, __p1_166, __p2_166) __extension__ ({ \
- int32_t __s0_166 = __p0_166; \
- int32x2_t __s1_166 = __p1_166; \
- int64_t __ret_166; \
- __ret_166 = vqdmulls_s32(__s0_166, vget_lane_s32(__s1_166, __p2_166)); \
- __ret_166; \
- })
- #else
- #define vqdmulls_lane_s32(__p0_167, __p1_167, __p2_167) __extension__ ({ \
- int32_t __s0_167 = __p0_167; \
- int32x2_t __s1_167 = __p1_167; \
- int32x2_t __rev1_167; __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 1, 0); \
- int64_t __ret_167; \
- __ret_167 = __noswap_vqdmulls_s32(__s0_167, __noswap_vget_lane_s32(__rev1_167, __p2_167)); \
- __ret_167; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmullh_lane_s16(__p0_168, __p1_168, __p2_168) __extension__ ({ \
- int16_t __s0_168 = __p0_168; \
- int16x4_t __s1_168 = __p1_168; \
- int32_t __ret_168; \
- __ret_168 = vqdmullh_s16(__s0_168, vget_lane_s16(__s1_168, __p2_168)); \
- __ret_168; \
- })
- #else
- #define vqdmullh_lane_s16(__p0_169, __p1_169, __p2_169) __extension__ ({ \
- int16_t __s0_169 = __p0_169; \
- int16x4_t __s1_169 = __p1_169; \
- int16x4_t __rev1_169; __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 3, 2, 1, 0); \
- int32_t __ret_169; \
- __ret_169 = __noswap_vqdmullh_s16(__s0_169, __noswap_vget_lane_s16(__rev1_169, __p2_169)); \
- __ret_169; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmulls_laneq_s32(__p0_170, __p1_170, __p2_170) __extension__ ({ \
- int32_t __s0_170 = __p0_170; \
- int32x4_t __s1_170 = __p1_170; \
- int64_t __ret_170; \
- __ret_170 = vqdmulls_s32(__s0_170, vgetq_lane_s32(__s1_170, __p2_170)); \
- __ret_170; \
- })
- #else
- #define vqdmulls_laneq_s32(__p0_171, __p1_171, __p2_171) __extension__ ({ \
- int32_t __s0_171 = __p0_171; \
- int32x4_t __s1_171 = __p1_171; \
- int32x4_t __rev1_171; __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \
- int64_t __ret_171; \
- __ret_171 = __noswap_vqdmulls_s32(__s0_171, __noswap_vgetq_lane_s32(__rev1_171, __p2_171)); \
- __ret_171; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmullh_laneq_s16(__p0_172, __p1_172, __p2_172) __extension__ ({ \
- int16_t __s0_172 = __p0_172; \
- int16x8_t __s1_172 = __p1_172; \
- int32_t __ret_172; \
- __ret_172 = vqdmullh_s16(__s0_172, vgetq_lane_s16(__s1_172, __p2_172)); \
- __ret_172; \
- })
- #else
- #define vqdmullh_laneq_s16(__p0_173, __p1_173, __p2_173) __extension__ ({ \
- int16_t __s0_173 = __p0_173; \
- int16x8_t __s1_173 = __p1_173; \
- int16x8_t __rev1_173; __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32_t __ret_173; \
- __ret_173 = __noswap_vqdmullh_s16(__s0_173, __noswap_vgetq_lane_s16(__rev1_173, __p2_173)); \
- __ret_173; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int64x2_t __ret; \
- __ret = vqdmull_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmull_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int64x2_t __ret; \
- __ret = __noswap_vqdmull_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vqdmull_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqdmull_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqdmull_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqmovns_s32(int32_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
- return __ret;
- }
- #else
- __ai int16_t vqmovns_s32(int32_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqmovnd_s64(int64_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
- return __ret;
- }
- #else
- __ai int32_t vqmovnd_s64(int64_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vqmovnh_s16(int16_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
- return __ret;
- }
- #else
- __ai int8_t vqmovnh_s16(int16_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vqmovns_u32(uint32_t __p0) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
- return __ret;
- }
- #else
- __ai uint16_t vqmovns_u32(uint32_t __p0) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vqmovnd_u64(uint64_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
- return __ret;
- }
- #else
- __ai uint32_t vqmovnd_u64(uint64_t __p0) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vqmovnh_u16(uint16_t __p0) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
- return __ret;
- }
- #else
- __ai uint8_t vqmovnh_u16(uint16_t __p0) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
- uint16x8_t __ret;
- __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
- return __ret;
- }
- #else
- __ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
- uint32x4_t __ret;
- __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
- return __ret;
- }
- #else
- __ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
- uint8x16_t __ret;
- __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
- return __ret;
- }
- #else
- __ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
- int16x8_t __ret;
- __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
- return __ret;
- }
- #else
- __ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
- int32x4_t __ret;
- __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
- return __ret;
- }
- #else
- __ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
- int8x16_t __ret;
- __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
- return __ret;
- }
- #else
- __ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqmovuns_s32(int32_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
- return __ret;
- }
- #else
- __ai int16_t vqmovuns_s32(int32_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqmovund_s64(int64_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
- return __ret;
- }
- #else
- __ai int32_t vqmovund_s64(int64_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vqmovunh_s16(int16_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
- return __ret;
- }
- #else
- __ai int8_t vqmovunh_s16(int16_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
- uint16x8_t __ret;
- __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
- return __ret;
- }
- #else
- __ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
- uint32x4_t __ret;
- __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
- return __ret;
- }
- #else
- __ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
- uint8x16_t __ret;
- __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
- return __ret;
- }
- #else
- __ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vqnegq_s64(int64x2_t __p0) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vqnegq_s64(int64x2_t __p0) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vqneg_s64(int64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vqneg_s64(int64x1_t __p0) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vqnegb_s8(int8_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
- return __ret;
- }
- #else
- __ai int8_t vqnegb_s8(int8_t __p0) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqnegs_s32(int32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
- return __ret;
- }
- #else
- __ai int32_t vqnegs_s32(int32_t __p0) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vqnegd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
- return __ret;
- }
- #else
- __ai int64_t vqnegd_s64(int64_t __p0) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqnegh_s16(int16_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
- return __ret;
- }
- #else
- __ai int16_t vqnegh_s16(int16_t __p0) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
- return __ret;
- }
- #else
- __ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
- return __ret;
- }
- __ai int32_t __noswap_vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
- return __ret;
- }
- #else
- __ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
- return __ret;
- }
- __ai int16_t __noswap_vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulhs_lane_s32(__p0_174, __p1_174, __p2_174) __extension__ ({ \
- int32_t __s0_174 = __p0_174; \
- int32x2_t __s1_174 = __p1_174; \
- int32_t __ret_174; \
- __ret_174 = vqrdmulhs_s32(__s0_174, vget_lane_s32(__s1_174, __p2_174)); \
- __ret_174; \
- })
- #else
- #define vqrdmulhs_lane_s32(__p0_175, __p1_175, __p2_175) __extension__ ({ \
- int32_t __s0_175 = __p0_175; \
- int32x2_t __s1_175 = __p1_175; \
- int32x2_t __rev1_175; __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 1, 0); \
- int32_t __ret_175; \
- __ret_175 = __noswap_vqrdmulhs_s32(__s0_175, __noswap_vget_lane_s32(__rev1_175, __p2_175)); \
- __ret_175; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulhh_lane_s16(__p0_176, __p1_176, __p2_176) __extension__ ({ \
- int16_t __s0_176 = __p0_176; \
- int16x4_t __s1_176 = __p1_176; \
- int16_t __ret_176; \
- __ret_176 = vqrdmulhh_s16(__s0_176, vget_lane_s16(__s1_176, __p2_176)); \
- __ret_176; \
- })
- #else
- #define vqrdmulhh_lane_s16(__p0_177, __p1_177, __p2_177) __extension__ ({ \
- int16_t __s0_177 = __p0_177; \
- int16x4_t __s1_177 = __p1_177; \
- int16x4_t __rev1_177; __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 3, 2, 1, 0); \
- int16_t __ret_177; \
- __ret_177 = __noswap_vqrdmulhh_s16(__s0_177, __noswap_vget_lane_s16(__rev1_177, __p2_177)); \
- __ret_177; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulhs_laneq_s32(__p0_178, __p1_178, __p2_178) __extension__ ({ \
- int32_t __s0_178 = __p0_178; \
- int32x4_t __s1_178 = __p1_178; \
- int32_t __ret_178; \
- __ret_178 = vqrdmulhs_s32(__s0_178, vgetq_lane_s32(__s1_178, __p2_178)); \
- __ret_178; \
- })
- #else
- #define vqrdmulhs_laneq_s32(__p0_179, __p1_179, __p2_179) __extension__ ({ \
- int32_t __s0_179 = __p0_179; \
- int32x4_t __s1_179 = __p1_179; \
- int32x4_t __rev1_179; __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 3, 2, 1, 0); \
- int32_t __ret_179; \
- __ret_179 = __noswap_vqrdmulhs_s32(__s0_179, __noswap_vgetq_lane_s32(__rev1_179, __p2_179)); \
- __ret_179; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulhh_laneq_s16(__p0_180, __p1_180, __p2_180) __extension__ ({ \
- int16_t __s0_180 = __p0_180; \
- int16x8_t __s1_180 = __p1_180; \
- int16_t __ret_180; \
- __ret_180 = vqrdmulhh_s16(__s0_180, vgetq_lane_s16(__s1_180, __p2_180)); \
- __ret_180; \
- })
- #else
- #define vqrdmulhh_laneq_s16(__p0_181, __p1_181, __p2_181) __extension__ ({ \
- int16_t __s0_181 = __p0_181; \
- int16x8_t __s1_181 = __p1_181; \
- int16x8_t __rev1_181; __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16_t __ret_181; \
- __ret_181 = __noswap_vqrdmulhh_s16(__s0_181, __noswap_vgetq_lane_s16(__rev1_181, __p2_181)); \
- __ret_181; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __ret; \
- __ret = vqrdmulhq_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __noswap_vqrdmulhq_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __ret; \
- __ret = vqrdmulhq_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x8_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret; \
- __ret = __noswap_vqrdmulhq_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __ret; \
- __ret = vqrdmulh_s32(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
- int32x2_t __s0 = __p0; \
- int32x4_t __s1 = __p1; \
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int32x2_t __ret; \
- __ret = __noswap_vqrdmulh_s32(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __ret; \
- __ret = vqrdmulh_s16(__s0, __builtin_shufflevector(__s1, __s1, __p2, __p2, __p2, __p2)); \
- __ret; \
- })
- #else
- #define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
- int16x4_t __s0 = __p0; \
- int16x8_t __s1 = __p1; \
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x4_t __ret; \
- __ret = __noswap_vqrdmulh_s16(__rev0, __builtin_shufflevector(__rev1, __rev1, __p2, __p2, __p2, __p2)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
- return __ret;
- }
- #else
- __ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
- return __ret;
- }
- #else
- __ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
- return __ret;
- }
- #else
- __ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_high_n_u32(__p0_182, __p1_182, __p2_182) __extension__ ({ \
- uint16x4_t __s0_182 = __p0_182; \
- uint32x4_t __s1_182 = __p1_182; \
- uint16x8_t __ret_182; \
- __ret_182 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_182), (uint16x4_t)(vqrshrn_n_u32(__s1_182, __p2_182)))); \
- __ret_182; \
- })
- #else
- #define vqrshrn_high_n_u32(__p0_183, __p1_183, __p2_183) __extension__ ({ \
- uint16x4_t __s0_183 = __p0_183; \
- uint32x4_t __s1_183 = __p1_183; \
- uint16x4_t __rev0_183; __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 3, 2, 1, 0); \
- uint32x4_t __rev1_183; __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 3, 2, 1, 0); \
- uint16x8_t __ret_183; \
- __ret_183 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_183), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_183, __p2_183)))); \
- __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_183; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_high_n_u64(__p0_184, __p1_184, __p2_184) __extension__ ({ \
- uint32x2_t __s0_184 = __p0_184; \
- uint64x2_t __s1_184 = __p1_184; \
- uint32x4_t __ret_184; \
- __ret_184 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_184), (uint32x2_t)(vqrshrn_n_u64(__s1_184, __p2_184)))); \
- __ret_184; \
- })
- #else
- #define vqrshrn_high_n_u64(__p0_185, __p1_185, __p2_185) __extension__ ({ \
- uint32x2_t __s0_185 = __p0_185; \
- uint64x2_t __s1_185 = __p1_185; \
- uint32x2_t __rev0_185; __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 1, 0); \
- uint64x2_t __rev1_185; __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 1, 0); \
- uint32x4_t __ret_185; \
- __ret_185 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_185), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_185, __p2_185)))); \
- __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \
- __ret_185; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_high_n_u16(__p0_186, __p1_186, __p2_186) __extension__ ({ \
- uint8x8_t __s0_186 = __p0_186; \
- uint16x8_t __s1_186 = __p1_186; \
- uint8x16_t __ret_186; \
- __ret_186 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_186), (uint8x8_t)(vqrshrn_n_u16(__s1_186, __p2_186)))); \
- __ret_186; \
- })
- #else
- #define vqrshrn_high_n_u16(__p0_187, __p1_187, __p2_187) __extension__ ({ \
- uint8x8_t __s0_187 = __p0_187; \
- uint16x8_t __s1_187 = __p1_187; \
- uint8x8_t __rev0_187; __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1_187; __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret_187; \
- __ret_187 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_187), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_187, __p2_187)))); \
- __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_187; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_high_n_s32(__p0_188, __p1_188, __p2_188) __extension__ ({ \
- int16x4_t __s0_188 = __p0_188; \
- int32x4_t __s1_188 = __p1_188; \
- int16x8_t __ret_188; \
- __ret_188 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_188), (int16x4_t)(vqrshrn_n_s32(__s1_188, __p2_188)))); \
- __ret_188; \
- })
- #else
- #define vqrshrn_high_n_s32(__p0_189, __p1_189, __p2_189) __extension__ ({ \
- int16x4_t __s0_189 = __p0_189; \
- int32x4_t __s1_189 = __p1_189; \
- int16x4_t __rev0_189; __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \
- int32x4_t __rev1_189; __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \
- int16x8_t __ret_189; \
- __ret_189 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_189), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_189, __p2_189)))); \
- __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_189; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_high_n_s64(__p0_190, __p1_190, __p2_190) __extension__ ({ \
- int32x2_t __s0_190 = __p0_190; \
- int64x2_t __s1_190 = __p1_190; \
- int32x4_t __ret_190; \
- __ret_190 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_190), (int32x2_t)(vqrshrn_n_s64(__s1_190, __p2_190)))); \
- __ret_190; \
- })
- #else
- #define vqrshrn_high_n_s64(__p0_191, __p1_191, __p2_191) __extension__ ({ \
- int32x2_t __s0_191 = __p0_191; \
- int64x2_t __s1_191 = __p1_191; \
- int32x2_t __rev0_191; __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \
- int64x2_t __rev1_191; __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \
- int32x4_t __ret_191; \
- __ret_191 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_191), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_191, __p2_191)))); \
- __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 3, 2, 1, 0); \
- __ret_191; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrn_high_n_s16(__p0_192, __p1_192, __p2_192) __extension__ ({ \
- int8x8_t __s0_192 = __p0_192; \
- int16x8_t __s1_192 = __p1_192; \
- int8x16_t __ret_192; \
- __ret_192 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_192), (int8x8_t)(vqrshrn_n_s16(__s1_192, __p2_192)))); \
- __ret_192; \
- })
- #else
- #define vqrshrn_high_n_s16(__p0_193, __p1_193, __p2_193) __extension__ ({ \
- int8x8_t __s0_193 = __p0_193; \
- int16x8_t __s1_193 = __p1_193; \
- int8x8_t __rev0_193; __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1_193; __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret_193; \
- __ret_193 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_193), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_193, __p2_193)))); \
- __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_193; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrun_high_n_s32(__p0_194, __p1_194, __p2_194) __extension__ ({ \
- int16x4_t __s0_194 = __p0_194; \
- int32x4_t __s1_194 = __p1_194; \
- int16x8_t __ret_194; \
- __ret_194 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_194), (int16x4_t)(vqrshrun_n_s32(__s1_194, __p2_194)))); \
- __ret_194; \
- })
- #else
- #define vqrshrun_high_n_s32(__p0_195, __p1_195, __p2_195) __extension__ ({ \
- int16x4_t __s0_195 = __p0_195; \
- int32x4_t __s1_195 = __p1_195; \
- int16x4_t __rev0_195; __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 3, 2, 1, 0); \
- int32x4_t __rev1_195; __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 3, 2, 1, 0); \
- int16x8_t __ret_195; \
- __ret_195 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_195), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_195, __p2_195)))); \
- __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_195; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrun_high_n_s64(__p0_196, __p1_196, __p2_196) __extension__ ({ \
- int32x2_t __s0_196 = __p0_196; \
- int64x2_t __s1_196 = __p1_196; \
- int32x4_t __ret_196; \
- __ret_196 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_196), (int32x2_t)(vqrshrun_n_s64(__s1_196, __p2_196)))); \
- __ret_196; \
- })
- #else
- #define vqrshrun_high_n_s64(__p0_197, __p1_197, __p2_197) __extension__ ({ \
- int32x2_t __s0_197 = __p0_197; \
- int64x2_t __s1_197 = __p1_197; \
- int32x2_t __rev0_197; __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 1, 0); \
- int64x2_t __rev1_197; __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 1, 0); \
- int32x4_t __ret_197; \
- __ret_197 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_197), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_197, __p2_197)))); \
- __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \
- __ret_197; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrun_high_n_s16(__p0_198, __p1_198, __p2_198) __extension__ ({ \
- int8x8_t __s0_198 = __p0_198; \
- int16x8_t __s1_198 = __p1_198; \
- int8x16_t __ret_198; \
- __ret_198 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_198), (int8x8_t)(vqrshrun_n_s16(__s1_198, __p2_198)))); \
- __ret_198; \
- })
- #else
- #define vqrshrun_high_n_s16(__p0_199, __p1_199, __p2_199) __extension__ ({ \
- int8x8_t __s0_199 = __p0_199; \
- int16x8_t __s1_199 = __p1_199; \
- int8x8_t __rev0_199; __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1_199; __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret_199; \
- __ret_199 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_199), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_199, __p2_199)))); \
- __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_199; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
- return __ret;
- }
- #else
- __ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
- return __ret;
- }
- #else
- __ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
- return __ret;
- }
- #else
- __ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
- uint8_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
- uint8_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshls_n_u32(__p0, __p1) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshls_n_u32(__p0, __p1) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshld_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshld_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
- int8_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
- int8_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshls_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshls_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshld_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshld_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
- int8_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
- int8_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_high_n_u32(__p0_200, __p1_200, __p2_200) __extension__ ({ \
- uint16x4_t __s0_200 = __p0_200; \
- uint32x4_t __s1_200 = __p1_200; \
- uint16x8_t __ret_200; \
- __ret_200 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_200), (uint16x4_t)(vqshrn_n_u32(__s1_200, __p2_200)))); \
- __ret_200; \
- })
- #else
- #define vqshrn_high_n_u32(__p0_201, __p1_201, __p2_201) __extension__ ({ \
- uint16x4_t __s0_201 = __p0_201; \
- uint32x4_t __s1_201 = __p1_201; \
- uint16x4_t __rev0_201; __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \
- uint32x4_t __rev1_201; __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \
- uint16x8_t __ret_201; \
- __ret_201 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_201), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_201, __p2_201)))); \
- __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_201; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_high_n_u64(__p0_202, __p1_202, __p2_202) __extension__ ({ \
- uint32x2_t __s0_202 = __p0_202; \
- uint64x2_t __s1_202 = __p1_202; \
- uint32x4_t __ret_202; \
- __ret_202 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_202), (uint32x2_t)(vqshrn_n_u64(__s1_202, __p2_202)))); \
- __ret_202; \
- })
- #else
- #define vqshrn_high_n_u64(__p0_203, __p1_203, __p2_203) __extension__ ({ \
- uint32x2_t __s0_203 = __p0_203; \
- uint64x2_t __s1_203 = __p1_203; \
- uint32x2_t __rev0_203; __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \
- uint64x2_t __rev1_203; __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \
- uint32x4_t __ret_203; \
- __ret_203 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_203), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_203, __p2_203)))); \
- __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 3, 2, 1, 0); \
- __ret_203; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_high_n_u16(__p0_204, __p1_204, __p2_204) __extension__ ({ \
- uint8x8_t __s0_204 = __p0_204; \
- uint16x8_t __s1_204 = __p1_204; \
- uint8x16_t __ret_204; \
- __ret_204 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_204), (uint8x8_t)(vqshrn_n_u16(__s1_204, __p2_204)))); \
- __ret_204; \
- })
- #else
- #define vqshrn_high_n_u16(__p0_205, __p1_205, __p2_205) __extension__ ({ \
- uint8x8_t __s0_205 = __p0_205; \
- uint16x8_t __s1_205 = __p1_205; \
- uint8x8_t __rev0_205; __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1_205; __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret_205; \
- __ret_205 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_205), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_205, __p2_205)))); \
- __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_205; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_high_n_s32(__p0_206, __p1_206, __p2_206) __extension__ ({ \
- int16x4_t __s0_206 = __p0_206; \
- int32x4_t __s1_206 = __p1_206; \
- int16x8_t __ret_206; \
- __ret_206 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_206), (int16x4_t)(vqshrn_n_s32(__s1_206, __p2_206)))); \
- __ret_206; \
- })
- #else
- #define vqshrn_high_n_s32(__p0_207, __p1_207, __p2_207) __extension__ ({ \
- int16x4_t __s0_207 = __p0_207; \
- int32x4_t __s1_207 = __p1_207; \
- int16x4_t __rev0_207; __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 3, 2, 1, 0); \
- int32x4_t __rev1_207; __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 3, 2, 1, 0); \
- int16x8_t __ret_207; \
- __ret_207 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_207), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_207, __p2_207)))); \
- __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_207; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_high_n_s64(__p0_208, __p1_208, __p2_208) __extension__ ({ \
- int32x2_t __s0_208 = __p0_208; \
- int64x2_t __s1_208 = __p1_208; \
- int32x4_t __ret_208; \
- __ret_208 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_208), (int32x2_t)(vqshrn_n_s64(__s1_208, __p2_208)))); \
- __ret_208; \
- })
- #else
- #define vqshrn_high_n_s64(__p0_209, __p1_209, __p2_209) __extension__ ({ \
- int32x2_t __s0_209 = __p0_209; \
- int64x2_t __s1_209 = __p1_209; \
- int32x2_t __rev0_209; __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 1, 0); \
- int64x2_t __rev1_209; __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 1, 0); \
- int32x4_t __ret_209; \
- __ret_209 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_209), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_209, __p2_209)))); \
- __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \
- __ret_209; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrn_high_n_s16(__p0_210, __p1_210, __p2_210) __extension__ ({ \
- int8x8_t __s0_210 = __p0_210; \
- int16x8_t __s1_210 = __p1_210; \
- int8x16_t __ret_210; \
- __ret_210 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_210), (int8x8_t)(vqshrn_n_s16(__s1_210, __p2_210)))); \
- __ret_210; \
- })
- #else
- #define vqshrn_high_n_s16(__p0_211, __p1_211, __p2_211) __extension__ ({ \
- int8x8_t __s0_211 = __p0_211; \
- int16x8_t __s1_211 = __p1_211; \
- int8x8_t __rev0_211; __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1_211; __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret_211; \
- __ret_211 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_211), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_211, __p2_211)))); \
- __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_211; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
- uint32_t __s0 = __p0; \
- uint16_t __ret; \
- __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint32_t __ret; \
- __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
- uint16_t __s0 = __p0; \
- uint8_t __ret; \
- __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrun_high_n_s32(__p0_212, __p1_212, __p2_212) __extension__ ({ \
- int16x4_t __s0_212 = __p0_212; \
- int32x4_t __s1_212 = __p1_212; \
- int16x8_t __ret_212; \
- __ret_212 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_212), (int16x4_t)(vqshrun_n_s32(__s1_212, __p2_212)))); \
- __ret_212; \
- })
- #else
- #define vqshrun_high_n_s32(__p0_213, __p1_213, __p2_213) __extension__ ({ \
- int16x4_t __s0_213 = __p0_213; \
- int32x4_t __s1_213 = __p1_213; \
- int16x4_t __rev0_213; __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \
- int32x4_t __rev1_213; __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \
- int16x8_t __ret_213; \
- __ret_213 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_213), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_213, __p2_213)))); \
- __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_213; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrun_high_n_s64(__p0_214, __p1_214, __p2_214) __extension__ ({ \
- int32x2_t __s0_214 = __p0_214; \
- int64x2_t __s1_214 = __p1_214; \
- int32x4_t __ret_214; \
- __ret_214 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_214), (int32x2_t)(vqshrun_n_s64(__s1_214, __p2_214)))); \
- __ret_214; \
- })
- #else
- #define vqshrun_high_n_s64(__p0_215, __p1_215, __p2_215) __extension__ ({ \
- int32x2_t __s0_215 = __p0_215; \
- int64x2_t __s1_215 = __p1_215; \
- int32x2_t __rev0_215; __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 1, 0); \
- int64x2_t __rev1_215; __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \
- int32x4_t __ret_215; \
- __ret_215 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_215), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_215, __p2_215)))); \
- __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \
- __ret_215; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrun_high_n_s16(__p0_216, __p1_216, __p2_216) __extension__ ({ \
- int8x8_t __s0_216 = __p0_216; \
- int16x8_t __s1_216 = __p1_216; \
- int8x16_t __ret_216; \
- __ret_216 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_216), (int8x8_t)(vqshrun_n_s16(__s1_216, __p2_216)))); \
- __ret_216; \
- })
- #else
- #define vqshrun_high_n_s16(__p0_217, __p1_217, __p2_217) __extension__ ({ \
- int8x8_t __s0_217 = __p0_217; \
- int16x8_t __s1_217 = __p1_217; \
- int8x8_t __rev0_217; __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1_217; __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret_217; \
- __ret_217 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_217), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_217, __p2_217)))); \
- __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_217; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
- int32_t __s0 = __p0; \
- int16_t __ret; \
- __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int32_t __ret; \
- __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
- __ret; \
- })
- #else
- #define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
- int16_t __s0 = __p0; \
- int8_t __ret; \
- __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
- return __ret;
- }
- #else
- __ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
- return __ret;
- }
- #else
- __ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
- return __ret;
- }
- __ai int32_t __noswap_vqsubs_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
- return __ret;
- }
- #else
- __ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
- return __ret;
- }
- __ai int16_t __noswap_vqsubh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqtbl1q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqtbl1_s8(int8x16_t __p0, int8x8_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
- poly8x16x2_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
- poly8x16x2_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
- uint8x16x2_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, int8x16_t __p1) {
- int8x16x2_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
- uint8x16x2_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, int8x8_t __p1) {
- int8x16x2_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
- poly8x16x3_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
- poly8x16x3_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
- uint8x16x3_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, int8x16_t __p1) {
- int8x16x3_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
- uint8x16x3_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, int8x8_t __p1) {
- int8x16x3_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
- poly8x16x4_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
- poly8x16x4_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
- uint8x16x4_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, int8x16_t __p1) {
- int8x16x4_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
- uint8x16x4_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, int8x8_t __p1) {
- int8x16x4_t __rev0;
- __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16x2_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16x2_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16x2_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, int8x16_t __p2) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16x2_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16x2_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16x2_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16x3_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16x3_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16x3_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, int8x16_t __p2) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16x3_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16x3_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16x3_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16x4_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16x4_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16x4_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, int8x16_t __p2) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16x4_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16x4_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16x4_t __rev1;
- __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint16x8_t __ret;
- __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
- uint32x4_t __ret;
- __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint8x16_t __ret;
- __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int16x8_t __ret;
- __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
- return __ret;
- }
- #else
- __ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
- int32x4_t __ret;
- __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
- return __ret;
- }
- #else
- __ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int8x16_t __ret;
- __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
- return __ret;
- }
- #else
- __ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
- return __ret;
- }
- #else
- __ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
- return __ret;
- }
- #else
- __ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vrbitq_s8(int8x16_t __p0) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vrbitq_s8(int8x16_t __p0) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vrbit_s8(int8x8_t __p0) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vrbit_s8(int8x8_t __p0) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrecpe_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrecpe_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vrecped_f64(float64_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
- return __ret;
- }
- #else
- __ai float64_t vrecped_f64(float64_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vrecpes_f32(float32_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
- return __ret;
- }
- #else
- __ai float32_t vrecpes_f32(float32_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vrecpxd_f64(float64_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
- return __ret;
- }
- #else
- __ai float64_t vrecpxd_f64(float64_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vrecpxs_f32(float32_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
- return __ret;
- }
- #else
- __ai float32_t vrecpxs_f32(float32_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_high_n_u32(__p0_218, __p1_218, __p2_218) __extension__ ({ \
- uint16x4_t __s0_218 = __p0_218; \
- uint32x4_t __s1_218 = __p1_218; \
- uint16x8_t __ret_218; \
- __ret_218 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_218), (uint16x4_t)(vrshrn_n_u32(__s1_218, __p2_218)))); \
- __ret_218; \
- })
- #else
- #define vrshrn_high_n_u32(__p0_219, __p1_219, __p2_219) __extension__ ({ \
- uint16x4_t __s0_219 = __p0_219; \
- uint32x4_t __s1_219 = __p1_219; \
- uint16x4_t __rev0_219; __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 3, 2, 1, 0); \
- uint32x4_t __rev1_219; __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 3, 2, 1, 0); \
- uint16x8_t __ret_219; \
- __ret_219 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_219), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_219, __p2_219)))); \
- __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_219; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_high_n_u64(__p0_220, __p1_220, __p2_220) __extension__ ({ \
- uint32x2_t __s0_220 = __p0_220; \
- uint64x2_t __s1_220 = __p1_220; \
- uint32x4_t __ret_220; \
- __ret_220 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_220), (uint32x2_t)(vrshrn_n_u64(__s1_220, __p2_220)))); \
- __ret_220; \
- })
- #else
- #define vrshrn_high_n_u64(__p0_221, __p1_221, __p2_221) __extension__ ({ \
- uint32x2_t __s0_221 = __p0_221; \
- uint64x2_t __s1_221 = __p1_221; \
- uint32x2_t __rev0_221; __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 1, 0); \
- uint64x2_t __rev1_221; __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 1, 0); \
- uint32x4_t __ret_221; \
- __ret_221 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_221), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_221, __p2_221)))); \
- __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 3, 2, 1, 0); \
- __ret_221; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_high_n_u16(__p0_222, __p1_222, __p2_222) __extension__ ({ \
- uint8x8_t __s0_222 = __p0_222; \
- uint16x8_t __s1_222 = __p1_222; \
- uint8x16_t __ret_222; \
- __ret_222 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_222), (uint8x8_t)(vrshrn_n_u16(__s1_222, __p2_222)))); \
- __ret_222; \
- })
- #else
- #define vrshrn_high_n_u16(__p0_223, __p1_223, __p2_223) __extension__ ({ \
- uint8x8_t __s0_223 = __p0_223; \
- uint16x8_t __s1_223 = __p1_223; \
- uint8x8_t __rev0_223; __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1_223; __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret_223; \
- __ret_223 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_223), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_223, __p2_223)))); \
- __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_223; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_high_n_s32(__p0_224, __p1_224, __p2_224) __extension__ ({ \
- int16x4_t __s0_224 = __p0_224; \
- int32x4_t __s1_224 = __p1_224; \
- int16x8_t __ret_224; \
- __ret_224 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_224), (int16x4_t)(vrshrn_n_s32(__s1_224, __p2_224)))); \
- __ret_224; \
- })
- #else
- #define vrshrn_high_n_s32(__p0_225, __p1_225, __p2_225) __extension__ ({ \
- int16x4_t __s0_225 = __p0_225; \
- int32x4_t __s1_225 = __p1_225; \
- int16x4_t __rev0_225; __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 3, 2, 1, 0); \
- int32x4_t __rev1_225; __rev1_225 = __builtin_shufflevector(__s1_225, __s1_225, 3, 2, 1, 0); \
- int16x8_t __ret_225; \
- __ret_225 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_225), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_225, __p2_225)))); \
- __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_225; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_high_n_s64(__p0_226, __p1_226, __p2_226) __extension__ ({ \
- int32x2_t __s0_226 = __p0_226; \
- int64x2_t __s1_226 = __p1_226; \
- int32x4_t __ret_226; \
- __ret_226 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_226), (int32x2_t)(vrshrn_n_s64(__s1_226, __p2_226)))); \
- __ret_226; \
- })
- #else
- #define vrshrn_high_n_s64(__p0_227, __p1_227, __p2_227) __extension__ ({ \
- int32x2_t __s0_227 = __p0_227; \
- int64x2_t __s1_227 = __p1_227; \
- int32x2_t __rev0_227; __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 1, 0); \
- int64x2_t __rev1_227; __rev1_227 = __builtin_shufflevector(__s1_227, __s1_227, 1, 0); \
- int32x4_t __ret_227; \
- __ret_227 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_227), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_227, __p2_227)))); \
- __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 3, 2, 1, 0); \
- __ret_227; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrshrn_high_n_s16(__p0_228, __p1_228, __p2_228) __extension__ ({ \
- int8x8_t __s0_228 = __p0_228; \
- int16x8_t __s1_228 = __p1_228; \
- int8x16_t __ret_228; \
- __ret_228 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_228), (int8x8_t)(vrshrn_n_s16(__s1_228, __p2_228)))); \
- __ret_228; \
- })
- #else
- #define vrshrn_high_n_s16(__p0_229, __p1_229, __p2_229) __extension__ ({ \
- int8x8_t __s0_229 = __p0_229; \
- int16x8_t __s1_229 = __p1_229; \
- int8x8_t __rev0_229; __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1_229; __rev1_229 = __builtin_shufflevector(__s1_229, __s1_229, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret_229; \
- __ret_229 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_229), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_229, __p2_229)))); \
- __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_229; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vrsqrted_f64(float64_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
- return __ret;
- }
- #else
- __ai float64_t vrsqrted_f64(float64_t __p0) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vrsqrtes_f32(float32_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
- return __ret;
- }
- #else
- __ai float32_t vrsqrtes_f32(float32_t __p0) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
- return __ret;
- }
- #else
- __ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
- float64_t __ret;
- __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
- return __ret;
- }
- #else
- __ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
- float32_t __ret;
- __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __s1 = __p1; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
- __ret; \
- })
- #else
- #define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __s1 = __p1; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __s1 = __p1; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
- __ret; \
- })
- #else
- #define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __s1 = __p1; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint16x8_t __ret;
- __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
- uint32x4_t __ret;
- __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint8x16_t __ret;
- __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int16x8_t __ret;
- __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
- return __ret;
- }
- #else
- __ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
- int32x4_t __ret;
- __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
- return __ret;
- }
- #else
- __ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int8x16_t __ret;
- __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
- return __ret;
- }
- #else
- __ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64_t __s0 = __p0; \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64_t __s0 = __p0; \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #define __noswap_vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64_t __s0 = __p0; \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64_t __s0 = __p0; \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64_t __s0 = __p0; \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64_t __s0 = __p0; \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__rev1, __p2); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64x2_t __s1 = __p1; \
- float64x2_t __ret; \
- __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (int8x16_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #else
- #define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #define __noswap_vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64_t __s0 = __p0; \
- float64x1_t __s1 = __p1; \
- float64x1_t __ret; \
- __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (int8x8_t)__s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshld_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vshld_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshld_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vshld_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_high_n_u8(__p0_230, __p1_230) __extension__ ({ \
- uint8x16_t __s0_230 = __p0_230; \
- uint16x8_t __ret_230; \
- __ret_230 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_230), __p1_230)); \
- __ret_230; \
- })
- #else
- #define vshll_high_n_u8(__p0_231, __p1_231) __extension__ ({ \
- uint8x16_t __s0_231 = __p0_231; \
- uint8x16_t __rev0_231; __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __ret_231; \
- __ret_231 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_231), __p1_231)); \
- __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_231; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_high_n_u32(__p0_232, __p1_232) __extension__ ({ \
- uint32x4_t __s0_232 = __p0_232; \
- uint64x2_t __ret_232; \
- __ret_232 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_232), __p1_232)); \
- __ret_232; \
- })
- #else
- #define vshll_high_n_u32(__p0_233, __p1_233) __extension__ ({ \
- uint32x4_t __s0_233 = __p0_233; \
- uint32x4_t __rev0_233; __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 3, 2, 1, 0); \
- uint64x2_t __ret_233; \
- __ret_233 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_233), __p1_233)); \
- __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 1, 0); \
- __ret_233; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_high_n_u16(__p0_234, __p1_234) __extension__ ({ \
- uint16x8_t __s0_234 = __p0_234; \
- uint32x4_t __ret_234; \
- __ret_234 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_234), __p1_234)); \
- __ret_234; \
- })
- #else
- #define vshll_high_n_u16(__p0_235, __p1_235) __extension__ ({ \
- uint16x8_t __s0_235 = __p0_235; \
- uint16x8_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint32x4_t __ret_235; \
- __ret_235 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_235), __p1_235)); \
- __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 3, 2, 1, 0); \
- __ret_235; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_high_n_s8(__p0_236, __p1_236) __extension__ ({ \
- int8x16_t __s0_236 = __p0_236; \
- int16x8_t __ret_236; \
- __ret_236 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_236), __p1_236)); \
- __ret_236; \
- })
- #else
- #define vshll_high_n_s8(__p0_237, __p1_237) __extension__ ({ \
- int8x16_t __s0_237 = __p0_237; \
- int8x16_t __rev0_237; __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __ret_237; \
- __ret_237 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_237), __p1_237)); \
- __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_237; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_high_n_s32(__p0_238, __p1_238) __extension__ ({ \
- int32x4_t __s0_238 = __p0_238; \
- int64x2_t __ret_238; \
- __ret_238 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_238), __p1_238)); \
- __ret_238; \
- })
- #else
- #define vshll_high_n_s32(__p0_239, __p1_239) __extension__ ({ \
- int32x4_t __s0_239 = __p0_239; \
- int32x4_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 3, 2, 1, 0); \
- int64x2_t __ret_239; \
- __ret_239 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_239), __p1_239)); \
- __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 1, 0); \
- __ret_239; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshll_high_n_s16(__p0_240, __p1_240) __extension__ ({ \
- int16x8_t __s0_240 = __p0_240; \
- int32x4_t __ret_240; \
- __ret_240 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_240), __p1_240)); \
- __ret_240; \
- })
- #else
- #define vshll_high_n_s16(__p0_241, __p1_241) __extension__ ({ \
- int16x8_t __s0_241 = __p0_241; \
- int16x8_t __rev0_241; __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 7, 6, 5, 4, 3, 2, 1, 0); \
- int32x4_t __ret_241; \
- __ret_241 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_241), __p1_241)); \
- __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 3, 2, 1, 0); \
- __ret_241; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrd_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vshrd_n_u64(__p0, __p1) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrd_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
- __ret; \
- })
- #else
- #define vshrd_n_s64(__p0, __p1) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_high_n_u32(__p0_242, __p1_242, __p2_242) __extension__ ({ \
- uint16x4_t __s0_242 = __p0_242; \
- uint32x4_t __s1_242 = __p1_242; \
- uint16x8_t __ret_242; \
- __ret_242 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_242), (uint16x4_t)(vshrn_n_u32(__s1_242, __p2_242)))); \
- __ret_242; \
- })
- #else
- #define vshrn_high_n_u32(__p0_243, __p1_243, __p2_243) __extension__ ({ \
- uint16x4_t __s0_243 = __p0_243; \
- uint32x4_t __s1_243 = __p1_243; \
- uint16x4_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 3, 2, 1, 0); \
- uint32x4_t __rev1_243; __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, 3, 2, 1, 0); \
- uint16x8_t __ret_243; \
- __ret_243 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_243), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_243, __p2_243)))); \
- __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_243; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_high_n_u64(__p0_244, __p1_244, __p2_244) __extension__ ({ \
- uint32x2_t __s0_244 = __p0_244; \
- uint64x2_t __s1_244 = __p1_244; \
- uint32x4_t __ret_244; \
- __ret_244 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_244), (uint32x2_t)(vshrn_n_u64(__s1_244, __p2_244)))); \
- __ret_244; \
- })
- #else
- #define vshrn_high_n_u64(__p0_245, __p1_245, __p2_245) __extension__ ({ \
- uint32x2_t __s0_245 = __p0_245; \
- uint64x2_t __s1_245 = __p1_245; \
- uint32x2_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 1, 0); \
- uint64x2_t __rev1_245; __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, 1, 0); \
- uint32x4_t __ret_245; \
- __ret_245 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_245), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_245, __p2_245)))); \
- __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 3, 2, 1, 0); \
- __ret_245; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_high_n_u16(__p0_246, __p1_246, __p2_246) __extension__ ({ \
- uint8x8_t __s0_246 = __p0_246; \
- uint16x8_t __s1_246 = __p1_246; \
- uint8x16_t __ret_246; \
- __ret_246 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_246), (uint8x8_t)(vshrn_n_u16(__s1_246, __p2_246)))); \
- __ret_246; \
- })
- #else
- #define vshrn_high_n_u16(__p0_247, __p1_247, __p2_247) __extension__ ({ \
- uint8x8_t __s0_247 = __p0_247; \
- uint16x8_t __s1_247 = __p1_247; \
- uint8x8_t __rev0_247; __rev0_247 = __builtin_shufflevector(__s0_247, __s0_247, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint16x8_t __rev1_247; __rev1_247 = __builtin_shufflevector(__s1_247, __s1_247, 7, 6, 5, 4, 3, 2, 1, 0); \
- uint8x16_t __ret_247; \
- __ret_247 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_247), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_247, __p2_247)))); \
- __ret_247 = __builtin_shufflevector(__ret_247, __ret_247, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_247; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_high_n_s32(__p0_248, __p1_248, __p2_248) __extension__ ({ \
- int16x4_t __s0_248 = __p0_248; \
- int32x4_t __s1_248 = __p1_248; \
- int16x8_t __ret_248; \
- __ret_248 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_248), (int16x4_t)(vshrn_n_s32(__s1_248, __p2_248)))); \
- __ret_248; \
- })
- #else
- #define vshrn_high_n_s32(__p0_249, __p1_249, __p2_249) __extension__ ({ \
- int16x4_t __s0_249 = __p0_249; \
- int32x4_t __s1_249 = __p1_249; \
- int16x4_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 3, 2, 1, 0); \
- int32x4_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 3, 2, 1, 0); \
- int16x8_t __ret_249; \
- __ret_249 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_249), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_249, __p2_249)))); \
- __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_249; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_high_n_s64(__p0_250, __p1_250, __p2_250) __extension__ ({ \
- int32x2_t __s0_250 = __p0_250; \
- int64x2_t __s1_250 = __p1_250; \
- int32x4_t __ret_250; \
- __ret_250 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_250), (int32x2_t)(vshrn_n_s64(__s1_250, __p2_250)))); \
- __ret_250; \
- })
- #else
- #define vshrn_high_n_s64(__p0_251, __p1_251, __p2_251) __extension__ ({ \
- int32x2_t __s0_251 = __p0_251; \
- int64x2_t __s1_251 = __p1_251; \
- int32x2_t __rev0_251; __rev0_251 = __builtin_shufflevector(__s0_251, __s0_251, 1, 0); \
- int64x2_t __rev1_251; __rev1_251 = __builtin_shufflevector(__s1_251, __s1_251, 1, 0); \
- int32x4_t __ret_251; \
- __ret_251 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_251), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_251, __p2_251)))); \
- __ret_251 = __builtin_shufflevector(__ret_251, __ret_251, 3, 2, 1, 0); \
- __ret_251; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vshrn_high_n_s16(__p0_252, __p1_252, __p2_252) __extension__ ({ \
- int8x8_t __s0_252 = __p0_252; \
- int16x8_t __s1_252 = __p1_252; \
- int8x16_t __ret_252; \
- __ret_252 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_252), (int8x8_t)(vshrn_n_s16(__s1_252, __p2_252)))); \
- __ret_252; \
- })
- #else
- #define vshrn_high_n_s16(__p0_253, __p1_253, __p2_253) __extension__ ({ \
- int8x8_t __s0_253 = __p0_253; \
- int16x8_t __s1_253 = __p1_253; \
- int8x8_t __rev0_253; __rev0_253 = __builtin_shufflevector(__s0_253, __s0_253, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16x8_t __rev1_253; __rev1_253 = __builtin_shufflevector(__s1_253, __s1_253, 7, 6, 5, 4, 3, 2, 1, 0); \
- int8x16_t __ret_253; \
- __ret_253 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_253), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_253, __p2_253)))); \
- __ret_253 = __builtin_shufflevector(__ret_253, __ret_253, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_253; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __s1 = __p1; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
- __ret; \
- })
- #else
- #define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __s1 = __p1; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __s1 = __p1; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
- __ret; \
- })
- #else
- #define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __s1 = __p1; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
- __ret; \
- })
- #else
- #define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
- __ret; \
- })
- #else
- #define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint8_t vsqaddb_u8(uint8_t __p0, uint8_t __p1) {
- uint8_t __ret;
- __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint32_t vsqadds_u32(uint32_t __p0, uint32_t __p1) {
- uint32_t __ret;
- __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vsqaddd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint16_t vsqaddh_u16(uint16_t __p0, uint16_t __p1) {
- uint16_t __ret;
- __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
- return __ret;
- }
- #else
- __ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
- return __ret;
- }
- #else
- __ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
- return __ret;
- }
- #else
- __ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
- return __ret;
- }
- #else
- __ai uint8x8_t vsqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
- return __ret;
- }
- #else
- __ai uint32x2_t vsqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vsqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
- return __ret;
- }
- #else
- __ai uint16x4_t vsqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42);
- return __ret;
- }
- #else
- __ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __ret;
- __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41);
- return __ret;
- }
- #else
- __ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vsqrt_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #else
- __ai float64x1_t vsqrt_f64(float64x1_t __p0) {
- float64x1_t __ret;
- __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vsqrt_f32(float32x2_t __p0) {
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9);
- return __ret;
- }
- #else
- __ai float32x2_t vsqrt_f32(float32x2_t __p0) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __ret;
- __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __s1 = __p1; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
- __ret; \
- })
- #else
- #define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __s1 = __p1; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __s1 = __p1; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
- __ret; \
- })
- #else
- #define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __s1 = __p1; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __s1 = __p1; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
- __ret; \
- })
- #else
- #define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64_t __s0 = __p0; \
- uint64_t __s1 = __p1; \
- uint64_t __ret; \
- __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __s1 = __p1; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
- __ret; \
- })
- #else
- #define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
- int64_t __s0 = __p0; \
- int64_t __s1 = __p1; \
- int64_t __ret; \
- __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
- __ret; \
- })
- #else
- #define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1_t __s0 = __p0; \
- poly64x1_t __s1 = __p1; \
- poly64x1_t __ret; \
- __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
- __ret; \
- })
- #else
- #define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2_t __s0 = __p0; \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- poly64x2_t __ret; \
- __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p64(__p0, __p1) __extension__ ({ \
- poly64x1_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
- })
- #else
- #define vst1_p64(__p0, __p1) __extension__ ({ \
- poly64x1_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p64(__p0, __p1) __extension__ ({ \
- poly64x2_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \
- })
- #else
- #define vst1q_p64(__p0, __p1) __extension__ ({ \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s1 = __p1; \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \
- })
- #else
- #define vst1q_f64(__p0, __p1) __extension__ ({ \
- float64x2_t __s1 = __p1; \
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
- })
- #else
- #define vst1_f64(__p0, __p1) __extension__ ({ \
- float64x1_t __s1 = __p1; \
- __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
- })
- #else
- #define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
- })
- #else
- #define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2_t __s1 = __p1; \
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s1 = __p1; \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
- })
- #else
- #define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2_t __s1 = __p1; \
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
- })
- #else
- #define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1_t __s1 = __p1; \
- __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p8_x2(__p0, __p1) __extension__ ({ \
- poly8x8x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
- })
- #else
- #define vst1_p8_x2(__p0, __p1) __extension__ ({ \
- poly8x8x2_t __s1 = __p1; \
- poly8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p64_x2(__p0, __p1) __extension__ ({ \
- poly64x1x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
- })
- #else
- #define vst1_p64_x2(__p0, __p1) __extension__ ({ \
- poly64x1x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p16_x2(__p0, __p1) __extension__ ({ \
- poly16x4x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
- })
- #else
- #define vst1_p16_x2(__p0, __p1) __extension__ ({ \
- poly16x4x2_t __s1 = __p1; \
- poly16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
- poly8x16x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
- })
- #else
- #define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
- poly8x16x2_t __s1 = __p1; \
- poly8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
- poly64x2x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
- })
- #else
- #define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
- poly64x2x2_t __s1 = __p1; \
- poly64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
- poly16x8x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
- })
- #else
- #define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
- poly16x8x2_t __s1 = __p1; \
- poly16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
- uint8x16x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
- })
- #else
- #define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
- uint8x16x2_t __s1 = __p1; \
- uint8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
- uint32x4x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
- })
- #else
- #define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
- uint32x4x2_t __s1 = __p1; \
- uint32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
- uint64x2x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
- })
- #else
- #define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
- uint64x2x2_t __s1 = __p1; \
- uint64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
- uint16x8x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
- })
- #else
- #define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
- uint16x8x2_t __s1 = __p1; \
- uint16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
- int8x16x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
- })
- #else
- #define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
- int8x16x2_t __s1 = __p1; \
- int8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
- float64x2x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 42); \
- })
- #else
- #define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
- float64x2x2_t __s1 = __p1; \
- float64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
- float32x4x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 41); \
- })
- #else
- #define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
- float32x4x2_t __s1 = __p1; \
- float32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
- float16x8x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 40); \
- })
- #else
- #define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
- float16x8x2_t __s1 = __p1; \
- float16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
- int32x4x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 34); \
- })
- #else
- #define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
- int32x4x2_t __s1 = __p1; \
- int32x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
- int64x2x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 35); \
- })
- #else
- #define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
- int64x2x2_t __s1 = __p1; \
- int64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
- int16x8x2_t __s1 = __p1; \
- __builtin_neon_vst1q_x2_v(__p0, __s1.val[0], __s1.val[1], 33); \
- })
- #else
- #define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
- int16x8x2_t __s1 = __p1; \
- int16x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x2_v(__p0, __rev1.val[0], __rev1.val[1], 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u8_x2(__p0, __p1) __extension__ ({ \
- uint8x8x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
- })
- #else
- #define vst1_u8_x2(__p0, __p1) __extension__ ({ \
- uint8x8x2_t __s1 = __p1; \
- uint8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u32_x2(__p0, __p1) __extension__ ({ \
- uint32x2x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
- })
- #else
- #define vst1_u32_x2(__p0, __p1) __extension__ ({ \
- uint32x2x2_t __s1 = __p1; \
- uint32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u64_x2(__p0, __p1) __extension__ ({ \
- uint64x1x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
- })
- #else
- #define vst1_u64_x2(__p0, __p1) __extension__ ({ \
- uint64x1x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u16_x2(__p0, __p1) __extension__ ({ \
- uint16x4x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
- })
- #else
- #define vst1_u16_x2(__p0, __p1) __extension__ ({ \
- uint16x4x2_t __s1 = __p1; \
- uint16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s8_x2(__p0, __p1) __extension__ ({ \
- int8x8x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
- })
- #else
- #define vst1_s8_x2(__p0, __p1) __extension__ ({ \
- int8x8x2_t __s1 = __p1; \
- int8x8x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f64_x2(__p0, __p1) __extension__ ({ \
- float64x1x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
- })
- #else
- #define vst1_f64_x2(__p0, __p1) __extension__ ({ \
- float64x1x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
- float32x2x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 9); \
- })
- #else
- #define vst1_f32_x2(__p0, __p1) __extension__ ({ \
- float32x2x2_t __s1 = __p1; \
- float32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
- float16x4x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 8); \
- })
- #else
- #define vst1_f16_x2(__p0, __p1) __extension__ ({ \
- float16x4x2_t __s1 = __p1; \
- float16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
- int32x2x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 2); \
- })
- #else
- #define vst1_s32_x2(__p0, __p1) __extension__ ({ \
- int32x2x2_t __s1 = __p1; \
- int32x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s64_x2(__p0, __p1) __extension__ ({ \
- int64x1x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
- })
- #else
- #define vst1_s64_x2(__p0, __p1) __extension__ ({ \
- int64x1x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
- int16x4x2_t __s1 = __p1; \
- __builtin_neon_vst1_x2_v(__p0, __s1.val[0], __s1.val[1], 1); \
- })
- #else
- #define vst1_s16_x2(__p0, __p1) __extension__ ({ \
- int16x4x2_t __s1 = __p1; \
- int16x4x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __builtin_neon_vst1_x2_v(__p0, __rev1.val[0], __rev1.val[1], 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p8_x3(__p0, __p1) __extension__ ({ \
- poly8x8x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
- })
- #else
- #define vst1_p8_x3(__p0, __p1) __extension__ ({ \
- poly8x8x3_t __s1 = __p1; \
- poly8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p64_x3(__p0, __p1) __extension__ ({ \
- poly64x1x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
- })
- #else
- #define vst1_p64_x3(__p0, __p1) __extension__ ({ \
- poly64x1x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p16_x3(__p0, __p1) __extension__ ({ \
- poly16x4x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
- })
- #else
- #define vst1_p16_x3(__p0, __p1) __extension__ ({ \
- poly16x4x3_t __s1 = __p1; \
- poly16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
- poly8x16x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
- })
- #else
- #define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
- poly8x16x3_t __s1 = __p1; \
- poly8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
- poly64x2x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
- })
- #else
- #define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
- poly64x2x3_t __s1 = __p1; \
- poly64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
- poly16x8x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
- })
- #else
- #define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
- poly16x8x3_t __s1 = __p1; \
- poly16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
- uint8x16x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
- })
- #else
- #define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
- uint8x16x3_t __s1 = __p1; \
- uint8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
- uint32x4x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
- })
- #else
- #define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
- uint32x4x3_t __s1 = __p1; \
- uint32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
- uint64x2x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
- })
- #else
- #define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
- uint64x2x3_t __s1 = __p1; \
- uint64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
- uint16x8x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
- })
- #else
- #define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
- uint16x8x3_t __s1 = __p1; \
- uint16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
- int8x16x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
- })
- #else
- #define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
- int8x16x3_t __s1 = __p1; \
- int8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
- float64x2x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
- })
- #else
- #define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
- float64x2x3_t __s1 = __p1; \
- float64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
- float32x4x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 41); \
- })
- #else
- #define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
- float32x4x3_t __s1 = __p1; \
- float32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
- float16x8x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 40); \
- })
- #else
- #define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
- float16x8x3_t __s1 = __p1; \
- float16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
- int32x4x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 34); \
- })
- #else
- #define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
- int32x4x3_t __s1 = __p1; \
- int32x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
- int64x2x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
- })
- #else
- #define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
- int64x2x3_t __s1 = __p1; \
- int64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
- int16x8x3_t __s1 = __p1; \
- __builtin_neon_vst1q_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 33); \
- })
- #else
- #define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
- int16x8x3_t __s1 = __p1; \
- int16x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u8_x3(__p0, __p1) __extension__ ({ \
- uint8x8x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
- })
- #else
- #define vst1_u8_x3(__p0, __p1) __extension__ ({ \
- uint8x8x3_t __s1 = __p1; \
- uint8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u32_x3(__p0, __p1) __extension__ ({ \
- uint32x2x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
- })
- #else
- #define vst1_u32_x3(__p0, __p1) __extension__ ({ \
- uint32x2x3_t __s1 = __p1; \
- uint32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u64_x3(__p0, __p1) __extension__ ({ \
- uint64x1x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
- })
- #else
- #define vst1_u64_x3(__p0, __p1) __extension__ ({ \
- uint64x1x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u16_x3(__p0, __p1) __extension__ ({ \
- uint16x4x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
- })
- #else
- #define vst1_u16_x3(__p0, __p1) __extension__ ({ \
- uint16x4x3_t __s1 = __p1; \
- uint16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s8_x3(__p0, __p1) __extension__ ({ \
- int8x8x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
- })
- #else
- #define vst1_s8_x3(__p0, __p1) __extension__ ({ \
- int8x8x3_t __s1 = __p1; \
- int8x8x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f64_x3(__p0, __p1) __extension__ ({ \
- float64x1x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
- })
- #else
- #define vst1_f64_x3(__p0, __p1) __extension__ ({ \
- float64x1x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
- float32x2x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 9); \
- })
- #else
- #define vst1_f32_x3(__p0, __p1) __extension__ ({ \
- float32x2x3_t __s1 = __p1; \
- float32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
- float16x4x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 8); \
- })
- #else
- #define vst1_f16_x3(__p0, __p1) __extension__ ({ \
- float16x4x3_t __s1 = __p1; \
- float16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
- int32x2x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 2); \
- })
- #else
- #define vst1_s32_x3(__p0, __p1) __extension__ ({ \
- int32x2x3_t __s1 = __p1; \
- int32x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s64_x3(__p0, __p1) __extension__ ({ \
- int64x1x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
- })
- #else
- #define vst1_s64_x3(__p0, __p1) __extension__ ({ \
- int64x1x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
- int16x4x3_t __s1 = __p1; \
- __builtin_neon_vst1_x3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 1); \
- })
- #else
- #define vst1_s16_x3(__p0, __p1) __extension__ ({ \
- int16x4x3_t __s1 = __p1; \
- int16x4x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __builtin_neon_vst1_x3_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p8_x4(__p0, __p1) __extension__ ({ \
- poly8x8x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
- })
- #else
- #define vst1_p8_x4(__p0, __p1) __extension__ ({ \
- poly8x8x4_t __s1 = __p1; \
- poly8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p64_x4(__p0, __p1) __extension__ ({ \
- poly64x1x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
- })
- #else
- #define vst1_p64_x4(__p0, __p1) __extension__ ({ \
- poly64x1x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_p16_x4(__p0, __p1) __extension__ ({ \
- poly16x4x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
- })
- #else
- #define vst1_p16_x4(__p0, __p1) __extension__ ({ \
- poly16x4x4_t __s1 = __p1; \
- poly16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
- poly8x16x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
- })
- #else
- #define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
- poly8x16x4_t __s1 = __p1; \
- poly8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
- poly64x2x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
- })
- #else
- #define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
- poly64x2x4_t __s1 = __p1; \
- poly64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
- poly16x8x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
- })
- #else
- #define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
- poly16x8x4_t __s1 = __p1; \
- poly16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
- uint8x16x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
- })
- #else
- #define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
- uint8x16x4_t __s1 = __p1; \
- uint8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
- uint32x4x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
- })
- #else
- #define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
- uint32x4x4_t __s1 = __p1; \
- uint32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
- uint64x2x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
- })
- #else
- #define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
- uint64x2x4_t __s1 = __p1; \
- uint64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
- uint16x8x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
- })
- #else
- #define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
- uint16x8x4_t __s1 = __p1; \
- uint16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
- int8x16x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
- })
- #else
- #define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
- int8x16x4_t __s1 = __p1; \
- int8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
- float64x2x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
- })
- #else
- #define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
- float64x2x4_t __s1 = __p1; \
- float64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
- float32x4x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 41); \
- })
- #else
- #define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
- float32x4x4_t __s1 = __p1; \
- float32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 41); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
- float16x8x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 40); \
- })
- #else
- #define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
- float16x8x4_t __s1 = __p1; \
- float16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 40); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
- int32x4x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 34); \
- })
- #else
- #define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
- int32x4x4_t __s1 = __p1; \
- int32x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 34); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
- int64x2x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
- })
- #else
- #define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
- int64x2x4_t __s1 = __p1; \
- int64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
- int16x8x4_t __s1 = __p1; \
- __builtin_neon_vst1q_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 33); \
- })
- #else
- #define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
- int16x8x4_t __s1 = __p1; \
- int16x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1q_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 33); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u8_x4(__p0, __p1) __extension__ ({ \
- uint8x8x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
- })
- #else
- #define vst1_u8_x4(__p0, __p1) __extension__ ({ \
- uint8x8x4_t __s1 = __p1; \
- uint8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u32_x4(__p0, __p1) __extension__ ({ \
- uint32x2x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
- })
- #else
- #define vst1_u32_x4(__p0, __p1) __extension__ ({ \
- uint32x2x4_t __s1 = __p1; \
- uint32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u64_x4(__p0, __p1) __extension__ ({ \
- uint64x1x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
- })
- #else
- #define vst1_u64_x4(__p0, __p1) __extension__ ({ \
- uint64x1x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_u16_x4(__p0, __p1) __extension__ ({ \
- uint16x4x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
- })
- #else
- #define vst1_u16_x4(__p0, __p1) __extension__ ({ \
- uint16x4x4_t __s1 = __p1; \
- uint16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s8_x4(__p0, __p1) __extension__ ({ \
- int8x8x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
- })
- #else
- #define vst1_s8_x4(__p0, __p1) __extension__ ({ \
- int8x8x4_t __s1 = __p1; \
- int8x8x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f64_x4(__p0, __p1) __extension__ ({ \
- float64x1x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
- })
- #else
- #define vst1_f64_x4(__p0, __p1) __extension__ ({ \
- float64x1x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
- float32x2x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 9); \
- })
- #else
- #define vst1_f32_x4(__p0, __p1) __extension__ ({ \
- float32x2x4_t __s1 = __p1; \
- float32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 9); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
- float16x4x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 8); \
- })
- #else
- #define vst1_f16_x4(__p0, __p1) __extension__ ({ \
- float16x4x4_t __s1 = __p1; \
- float16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 8); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
- int32x2x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 2); \
- })
- #else
- #define vst1_s32_x4(__p0, __p1) __extension__ ({ \
- int32x2x4_t __s1 = __p1; \
- int32x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 2); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s64_x4(__p0, __p1) __extension__ ({ \
- int64x1x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
- })
- #else
- #define vst1_s64_x4(__p0, __p1) __extension__ ({ \
- int64x1x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
- int16x4x4_t __s1 = __p1; \
- __builtin_neon_vst1_x4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 1); \
- })
- #else
- #define vst1_s16_x4(__p0, __p1) __extension__ ({ \
- int16x4x4_t __s1 = __p1; \
- int16x4x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
- __builtin_neon_vst1_x4_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_p64(__p0, __p1) __extension__ ({ \
- poly64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
- })
- #else
- #define vst2_p64(__p0, __p1) __extension__ ({ \
- poly64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_p64(__p0, __p1) __extension__ ({ \
- poly64x2x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
- })
- #else
- #define vst2q_p64(__p0, __p1) __extension__ ({ \
- poly64x2x2_t __s1 = __p1; \
- poly64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_u64(__p0, __p1) __extension__ ({ \
- uint64x2x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
- })
- #else
- #define vst2q_u64(__p0, __p1) __extension__ ({ \
- uint64x2x2_t __s1 = __p1; \
- uint64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_f64(__p0, __p1) __extension__ ({ \
- float64x2x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 42); \
- })
- #else
- #define vst2q_f64(__p0, __p1) __extension__ ({ \
- float64x2x2_t __s1 = __p1; \
- float64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_s64(__p0, __p1) __extension__ ({ \
- int64x2x2_t __s1 = __p1; \
- __builtin_neon_vst2q_v(__p0, __s1.val[0], __s1.val[1], 35); \
- })
- #else
- #define vst2q_s64(__p0, __p1) __extension__ ({ \
- int64x2x2_t __s1 = __p1; \
- int64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2q_v(__p0, __rev1.val[0], __rev1.val[1], 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_f64(__p0, __p1) __extension__ ({ \
- float64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
- })
- #else
- #define vst2_f64(__p0, __p1) __extension__ ({ \
- float64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_v(__p0, __s1.val[0], __s1.val[1], 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
- })
- #else
- #define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
- })
- #else
- #define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x2_t __s1 = __p1; \
- poly8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
- })
- #else
- #define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x2_t __s1 = __p1; \
- poly64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
- })
- #else
- #define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x2_t __s1 = __p1; \
- uint8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
- })
- #else
- #define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x2_t __s1 = __p1; \
- uint64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
- })
- #else
- #define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x2_t __s1 = __p1; \
- int8x16x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 42); \
- })
- #else
- #define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x2_t __s1 = __p1; \
- float64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x2_t __s1 = __p1; \
- __builtin_neon_vst2q_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 35); \
- })
- #else
- #define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x2_t __s1 = __p1; \
- int64x2x2_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __builtin_neon_vst2q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __p2, 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
- })
- #else
- #define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
- })
- #else
- #define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
- })
- #else
- #define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x2_t __s1 = __p1; \
- __builtin_neon_vst2_lane_v(__p0, __s1.val[0], __s1.val[1], __p2, 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_p64(__p0, __p1) __extension__ ({ \
- poly64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
- })
- #else
- #define vst3_p64(__p0, __p1) __extension__ ({ \
- poly64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_p64(__p0, __p1) __extension__ ({ \
- poly64x2x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
- })
- #else
- #define vst3q_p64(__p0, __p1) __extension__ ({ \
- poly64x2x3_t __s1 = __p1; \
- poly64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_u64(__p0, __p1) __extension__ ({ \
- uint64x2x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
- })
- #else
- #define vst3q_u64(__p0, __p1) __extension__ ({ \
- uint64x2x3_t __s1 = __p1; \
- uint64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_f64(__p0, __p1) __extension__ ({ \
- float64x2x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 42); \
- })
- #else
- #define vst3q_f64(__p0, __p1) __extension__ ({ \
- float64x2x3_t __s1 = __p1; \
- float64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_s64(__p0, __p1) __extension__ ({ \
- int64x2x3_t __s1 = __p1; \
- __builtin_neon_vst3q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 35); \
- })
- #else
- #define vst3q_s64(__p0, __p1) __extension__ ({ \
- int64x2x3_t __s1 = __p1; \
- int64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_f64(__p0, __p1) __extension__ ({ \
- float64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
- })
- #else
- #define vst3_f64(__p0, __p1) __extension__ ({ \
- float64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
- })
- #else
- #define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
- })
- #else
- #define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x3_t __s1 = __p1; \
- poly8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
- })
- #else
- #define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x3_t __s1 = __p1; \
- poly64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
- })
- #else
- #define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x3_t __s1 = __p1; \
- uint8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
- })
- #else
- #define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x3_t __s1 = __p1; \
- uint64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
- })
- #else
- #define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x3_t __s1 = __p1; \
- int8x16x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 42); \
- })
- #else
- #define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x3_t __s1 = __p1; \
- float64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x3_t __s1 = __p1; \
- __builtin_neon_vst3q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 35); \
- })
- #else
- #define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x3_t __s1 = __p1; \
- int64x2x3_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __builtin_neon_vst3q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __p2, 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
- })
- #else
- #define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
- })
- #else
- #define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
- })
- #else
- #define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x3_t __s1 = __p1; \
- __builtin_neon_vst3_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __p2, 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_p64(__p0, __p1) __extension__ ({ \
- poly64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
- })
- #else
- #define vst4_p64(__p0, __p1) __extension__ ({ \
- poly64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_p64(__p0, __p1) __extension__ ({ \
- poly64x2x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
- })
- #else
- #define vst4q_p64(__p0, __p1) __extension__ ({ \
- poly64x2x4_t __s1 = __p1; \
- poly64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_u64(__p0, __p1) __extension__ ({ \
- uint64x2x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
- })
- #else
- #define vst4q_u64(__p0, __p1) __extension__ ({ \
- uint64x2x4_t __s1 = __p1; \
- uint64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_f64(__p0, __p1) __extension__ ({ \
- float64x2x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 42); \
- })
- #else
- #define vst4q_f64(__p0, __p1) __extension__ ({ \
- float64x2x4_t __s1 = __p1; \
- float64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_s64(__p0, __p1) __extension__ ({ \
- int64x2x4_t __s1 = __p1; \
- __builtin_neon_vst4q_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 35); \
- })
- #else
- #define vst4q_s64(__p0, __p1) __extension__ ({ \
- int64x2x4_t __s1 = __p1; \
- int64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4q_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_f64(__p0, __p1) __extension__ ({ \
- float64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
- })
- #else
- #define vst4_f64(__p0, __p1) __extension__ ({ \
- float64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
- })
- #else
- #define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
- })
- #else
- #define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
- poly8x16x4_t __s1 = __p1; \
- poly8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
- })
- #else
- #define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
- poly64x2x4_t __s1 = __p1; \
- poly64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
- })
- #else
- #define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
- uint8x16x4_t __s1 = __p1; \
- uint8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
- })
- #else
- #define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x2x4_t __s1 = __p1; \
- uint64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
- })
- #else
- #define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
- int8x16x4_t __s1 = __p1; \
- int8x16x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 42); \
- })
- #else
- #define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x2x4_t __s1 = __p1; \
- float64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 42); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x4_t __s1 = __p1; \
- __builtin_neon_vst4q_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 35); \
- })
- #else
- #define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x2x4_t __s1 = __p1; \
- int64x2x4_t __rev1; \
- __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
- __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
- __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
- __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
- __builtin_neon_vst4q_lane_v(__p0, __rev1.val[0], __rev1.val[1], __rev1.val[2], __rev1.val[3], __p2, 35); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
- })
- #else
- #define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
- uint64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
- })
- #else
- #define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
- float64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 10); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
- })
- #else
- #define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
- int64x1x4_t __s1 = __p1; \
- __builtin_neon_vst4_lane_v(__p0, __s1.val[0], __s1.val[1], __s1.val[2], __s1.val[3], __p2, 3); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vstrq_p128(__p0, __p1) __extension__ ({ \
- poly128_t __s1 = __p1; \
- __builtin_neon_vstrq_p128(__p0, __s1); \
- })
- #else
- #define vstrq_p128(__p0, __p1) __extension__ ({ \
- poly128_t __s1 = __p1; \
- __builtin_neon_vstrq_p128(__p0, __s1); \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __rev0 - __rev1;
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #else
- __ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
- float64x1_t __ret;
- __ret = __p0 - __p1;
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint16x8_t __ret;
- __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
- uint32x4_t __ret;
- __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint8x16_t __ret;
- __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
- return __ret;
- }
- #else
- __ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int16x8_t __ret;
- __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
- return __ret;
- }
- #else
- __ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
- int32x4_t __ret;
- __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
- return __ret;
- }
- #else
- __ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int8x16_t __ret;
- __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
- return __ret;
- }
- #else
- __ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint16x8_t __ret;
- __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint64x2_t __ret;
- __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint32x4_t __ret;
- __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
- int16x8_t __ret;
- __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
- return __ret;
- }
- #else
- __ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
- int64x2_t __ret;
- __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
- return __ret;
- }
- #else
- __ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
- int32x4_t __ret;
- __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
- return __ret;
- }
- #else
- __ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 - vmovl_high_u8(__p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 - vmovl_high_u32(__p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 - vmovl_high_u16(__p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
- int16x8_t __ret;
- __ret = __p0 - vmovl_high_s8(__p1);
- return __ret;
- }
- #else
- __ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
- int64x2_t __ret;
- __ret = __p0 - vmovl_high_s32(__p1);
- return __ret;
- }
- #else
- __ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
- int32x4_t __ret;
- __ret = __p0 - vmovl_high_s16(__p1);
- return __ret;
- }
- #else
- __ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
- return __ret;
- }
- #else
- __ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
- return __ret;
- }
- #else
- __ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
- return __ret;
- }
- #else
- __ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
- return __ret;
- }
- #else
- __ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
- return __ret;
- }
- #else
- __ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
- return __ret;
- }
- #else
- __ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
- return __ret;
- }
- #else
- __ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
- return __ret;
- }
- #else
- __ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
- return __ret;
- }
- #else
- __ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
- return __ret;
- }
- #else
- __ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
- return __ret;
- }
- #else
- __ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
- return __ret;
- }
- #else
- __ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
- return __ret;
- }
- #else
- __ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
- return __ret;
- }
- #else
- __ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
- return __ret;
- }
- #else
- __ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
- return __ret;
- }
- #else
- __ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
- return __ret;
- }
- #else
- __ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
- return __ret;
- }
- #else
- __ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
- return __ret;
- }
- #else
- __ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
- return __ret;
- }
- #else
- __ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
- return __ret;
- }
- #else
- __ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
- return __ret;
- }
- #else
- __ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
- return __ret;
- }
- #else
- __ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
- return __ret;
- }
- #else
- __ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
- return __ret;
- }
- #else
- __ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
- return __ret;
- }
- #else
- __ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
- return __ret;
- }
- #else
- __ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
- return __ret;
- }
- #else
- __ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
- return __ret;
- }
- #else
- __ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
- return __ret;
- }
- #else
- __ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
- return __ret;
- }
- #else
- __ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #else
- __ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
- uint64x1_t __ret;
- __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
- return __ret;
- }
- #else
- __ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
- uint64_t __ret;
- __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
- return __ret;
- }
- #else
- __ai int8_t vuqaddb_s8(int8_t __p0, int8_t __p1) {
- int8_t __ret;
- __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
- return __ret;
- }
- #else
- __ai int32_t vuqadds_s32(int32_t __p0, int32_t __p1) {
- int32_t __ret;
- __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
- return __ret;
- }
- #else
- __ai int64_t vuqaddd_s64(int64_t __p0, int64_t __p1) {
- int64_t __ret;
- __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
- return __ret;
- }
- #else
- __ai int16_t vuqaddh_s16(int16_t __p0, int16_t __p1) {
- int16_t __ret;
- __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
- return __ret;
- }
- #else
- __ai int8x16_t vuqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
- return __ret;
- }
- #else
- __ai int32x4_t vuqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
- return __ret;
- }
- #else
- __ai int64x2_t vuqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
- return __ret;
- }
- #else
- __ai int16x8_t vuqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
- return __ret;
- }
- #else
- __ai int8x8_t vuqadd_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vuqadd_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #else
- __ai int64x1_t vuqadd_s64(int64x1_t __p0, int64x1_t __p1) {
- int64x1_t __ret;
- __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
- return __ret;
- }
- #else
- __ai int16x4_t vuqadd_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
- return __ret;
- }
- #else
- __ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
- return __ret;
- }
- #else
- __ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
- return __ret;
- }
- #else
- __ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
- return __ret;
- }
- #else
- __ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
- return __ret;
- }
- #else
- __ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
- return __ret;
- }
- #else
- __ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
- return __ret;
- }
- #else
- __ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
- return __ret;
- }
- #else
- __ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
- return __ret;
- }
- #else
- __ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
- return __ret;
- }
- #else
- __ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
- return __ret;
- }
- #else
- __ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
- return __ret;
- }
- #else
- __ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
- return __ret;
- }
- #else
- __ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
- return __ret;
- }
- #else
- __ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
- return __ret;
- }
- #else
- __ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
- return __ret;
- }
- #else
- __ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
- return __ret;
- }
- #else
- __ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
- return __ret;
- }
- #else
- __ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
- return __ret;
- }
- #else
- __ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
- return __ret;
- }
- #else
- __ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
- return __ret;
- }
- #else
- __ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
- return __ret;
- }
- #else
- __ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
- return __ret;
- }
- #else
- __ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
- return __ret;
- }
- #else
- __ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
- return __ret;
- }
- #else
- __ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
- return __ret;
- }
- #else
- __ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
- return __ret;
- }
- #else
- __ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
- return __ret;
- }
- #else
- __ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
- return __ret;
- }
- #else
- __ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
- return __ret;
- }
- #else
- __ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
- return __ret;
- }
- #else
- __ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
- return __ret;
- }
- #else
- __ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
- return __ret;
- }
- #else
- __ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
- return __ret;
- }
- #else
- __ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
- return __ret;
- }
- #else
- __ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
- return __ret;
- }
- #else
- __ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
- return __ret;
- }
- #else
- __ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
- return __ret;
- }
- #else
- __ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
- return __ret;
- }
- #else
- __ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
- return __ret;
- }
- #else
- __ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
- return __ret;
- }
- #else
- __ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
- return __ret;
- }
- #else
- __ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
- return __ret;
- }
- #else
- __ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
- return __ret;
- }
- #else
- __ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
- return __ret;
- }
- #else
- __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
- return __ret;
- }
- #else
- __ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
- return __ret;
- }
- #else
- __ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
- poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
- return __ret;
- }
- #else
- __ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
- poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- poly16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
- return __ret;
- }
- #else
- __ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
- poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- poly8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
- poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- poly64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
- return __ret;
- }
- #else
- __ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
- poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- poly16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
- return __ret;
- }
- #else
- __ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
- return __ret;
- }
- #else
- __ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
- return __ret;
- }
- #else
- __ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
- return __ret;
- }
- #else
- __ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
- float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
- return __ret;
- }
- #else
- __ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
- float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- float32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
- return __ret;
- }
- #else
- __ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
- return __ret;
- }
- #else
- __ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
- return __ret;
- }
- #else
- __ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
- return __ret;
- }
- #else
- __ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
- return __ret;
- }
- #else
- __ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
- float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- float32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
- return __ret;
- }
- #else
- __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
- return __ret;
- }
- #else
- __ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint8x16_t __ret;
- __ret = __p0 + vabdq_u8(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __ret;
- __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 + vabdq_u32(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 + vabdq_u16(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int8x16_t __ret;
- __ret = __p0 + vabdq_s8(__p1, __p2);
- return __ret;
- }
- #else
- __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __ret;
- __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __ret;
- __ret = __p0 + vabdq_s32(__p1, __p2);
- return __ret;
- }
- #else
- __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __ret;
- __ret = __p0 + vabdq_s16(__p1, __p2);
- return __ret;
- }
- #else
- __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint8x8_t __ret;
- __ret = __p0 + vabd_u8(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __ret;
- __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint32x2_t __ret;
- __ret = __p0 + vabd_u32(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint32x2_t __ret;
- __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint16x4_t __ret;
- __ret = __p0 + vabd_u16(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint16x4_t __ret;
- __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int8x8_t __ret;
- __ret = __p0 + vabd_s8(__p1, __p2);
- return __ret;
- }
- #else
- __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __ret;
- __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __ret;
- __ret = __p0 + vabd_s32(__p1, __p2);
- return __ret;
- }
- #else
- __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int32x2_t __ret;
- __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __ret;
- __ret = __p0 + vabd_s16(__p1, __p2);
- return __ret;
- }
- #else
- __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int16x4_t __ret;
- __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1))));
- return __ret;
- }
- #else
- __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint16x8_t __ret;
- __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1))));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1))));
- return __ret;
- }
- #else
- __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint64x2_t __ret;
- __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1))));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1))));
- return __ret;
- }
- #else
- __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint32x4_t __ret;
- __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1))));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1))));
- return __ret;
- }
- #else
- __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
- int16x8_t __ret;
- __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1))));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1))));
- return __ret;
- }
- #else
- __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
- int64x2_t __ret;
- __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1))));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1))));
- return __ret;
- }
- #else
- __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
- int32x4_t __ret;
- __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1))));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint16x8_t __ret;
- __ret = vmovl_u8(__p0) + vmovl_u8(__p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
- uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint64x2_t __ret;
- __ret = vmovl_u32(__p0) + vmovl_u32(__p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
- uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint32x4_t __ret;
- __ret = vmovl_u16(__p0) + vmovl_u16(__p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
- uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
- int16x8_t __ret;
- __ret = vmovl_s8(__p0) + vmovl_s8(__p1);
- return __ret;
- }
- #else
- __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
- int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
- int64x2_t __ret;
- __ret = vmovl_s32(__p0) + vmovl_s32(__p1);
- return __ret;
- }
- #else
- __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
- int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
- int32x4_t __ret;
- __ret = vmovl_s16(__p0) + vmovl_s16(__p1);
- return __ret;
- }
- #else
- __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
- int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 + vmovl_u8(__p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 + __noswap_vmovl_u8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 + vmovl_u32(__p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 + __noswap_vmovl_u32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 + vmovl_u16(__p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 + __noswap_vmovl_u16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
- int16x8_t __ret;
- __ret = __p0 + vmovl_s8(__p1);
- return __ret;
- }
- #else
- __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 + __noswap_vmovl_s8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
- int64x2_t __ret;
- __ret = __p0 + vmovl_s32(__p1);
- return __ret;
- }
- #else
- __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 + __noswap_vmovl_s32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
- int32x4_t __ret;
- __ret = __p0 + vmovl_s16(__p1);
- return __ret;
- }
- #else
- __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 + __noswap_vmovl_s16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vget_lane_f16(__p0_254, __p1_254) __extension__ ({ \
- float16x4_t __s0_254 = __p0_254; \
- float16_t __ret_254; \
- float16x4_t __reint_254 = __s0_254; \
- int16_t __reint1_254 = vget_lane_s16(*(int16x4_t *) &__reint_254, __p1_254); \
- __ret_254 = *(float16_t *) &__reint1_254; \
- __ret_254; \
- })
- #else
- #define vget_lane_f16(__p0_255, __p1_255) __extension__ ({ \
- float16x4_t __s0_255 = __p0_255; \
- float16x4_t __rev0_255; __rev0_255 = __builtin_shufflevector(__s0_255, __s0_255, 3, 2, 1, 0); \
- float16_t __ret_255; \
- float16x4_t __reint_255 = __rev0_255; \
- int16_t __reint1_255 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_255, __p1_255); \
- __ret_255 = *(float16_t *) &__reint1_255; \
- __ret_255; \
- })
- #define __noswap_vget_lane_f16(__p0_256, __p1_256) __extension__ ({ \
- float16x4_t __s0_256 = __p0_256; \
- float16_t __ret_256; \
- float16x4_t __reint_256 = __s0_256; \
- int16_t __reint1_256 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_256, __p1_256); \
- __ret_256 = *(float16_t *) &__reint1_256; \
- __ret_256; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vgetq_lane_f16(__p0_257, __p1_257) __extension__ ({ \
- float16x8_t __s0_257 = __p0_257; \
- float16_t __ret_257; \
- float16x8_t __reint_257 = __s0_257; \
- int16_t __reint1_257 = vgetq_lane_s16(*(int16x8_t *) &__reint_257, __p1_257); \
- __ret_257 = *(float16_t *) &__reint1_257; \
- __ret_257; \
- })
- #else
- #define vgetq_lane_f16(__p0_258, __p1_258) __extension__ ({ \
- float16x8_t __s0_258 = __p0_258; \
- float16x8_t __rev0_258; __rev0_258 = __builtin_shufflevector(__s0_258, __s0_258, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16_t __ret_258; \
- float16x8_t __reint_258 = __rev0_258; \
- int16_t __reint1_258 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_258, __p1_258); \
- __ret_258 = *(float16_t *) &__reint1_258; \
- __ret_258; \
- })
- #define __noswap_vgetq_lane_f16(__p0_259, __p1_259) __extension__ ({ \
- float16x8_t __s0_259 = __p0_259; \
- float16_t __ret_259; \
- float16x8_t __reint_259 = __s0_259; \
- int16_t __reint1_259 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_259, __p1_259); \
- __ret_259 = *(float16_t *) &__reint1_259; \
- __ret_259; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 + vmull_u8(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 + __noswap_vmull_u8(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint64x2_t __ret;
- __ret = __p0 + vmull_u32(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint64x2_t __ret;
- __ret = __p0 + __noswap_vmull_u32(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 + vmull_u16(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 + __noswap_vmull_u16(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int16x8_t __ret;
- __ret = __p0 + vmull_s8(__p1, __p2);
- return __ret;
- }
- #else
- __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int16x8_t __ret;
- __ret = __p0 + __noswap_vmull_s8(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __ret;
- __ret = __p0 + vmull_s32(__p1, __p2);
- return __ret;
- }
- #else
- __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __ret;
- __ret = __p0 + __noswap_vmull_s32(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __ret;
- __ret = __p0 + vmull_s16(__p1, __p2);
- return __ret;
- }
- #else
- __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __ret;
- __ret = __p0 + __noswap_vmull_s16(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint64x2_t __ret; \
- __ret = __s0 + vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- uint64x2_t __ret; \
- __ret = __rev0 + __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 + vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 + __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = __s0 + vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int64x2_t __ret; \
- __ret = __rev0 + __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 + vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlal_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 + __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
- uint64x2_t __ret;
- __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
- return __ret;
- }
- #else
- __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
- uint64x2_t __ret;
- __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
- return __ret;
- }
- #else
- __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
- return __ret;
- }
- #else
- __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
- return __ret;
- }
- #else
- __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 - vmull_u8(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 - __noswap_vmull_u8(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint64x2_t __ret;
- __ret = __p0 - vmull_u32(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint64x2_t __ret;
- __ret = __p0 - __noswap_vmull_u32(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 - vmull_u16(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 - __noswap_vmull_u16(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int16x8_t __ret;
- __ret = __p0 - vmull_s8(__p1, __p2);
- return __ret;
- }
- #else
- __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int16x8_t __ret;
- __ret = __p0 - __noswap_vmull_s8(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __ret;
- __ret = __p0 - vmull_s32(__p1, __p2);
- return __ret;
- }
- #else
- __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __ret;
- __ret = __p0 - __noswap_vmull_s32(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __ret;
- __ret = __p0 - vmull_s16(__p1, __p2);
- return __ret;
- }
- #else
- __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __ret;
- __ret = __p0 - __noswap_vmull_s16(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint64x2_t __ret; \
- __ret = __s0 - vmull_u32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_lane_u32(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint64x2_t __s0 = __p0; \
- uint32x2_t __s1 = __p1; \
- uint32x2_t __s2 = __p2; \
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- uint64x2_t __ret; \
- __ret = __rev0 - __noswap_vmull_u32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint32x4_t __ret; \
- __ret = __s0 - vmull_u16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_lane_u16(__p0, __p1, __p2, __p3) __extension__ ({ \
- uint32x4_t __s0 = __p0; \
- uint16x4_t __s1 = __p1; \
- uint16x4_t __s2 = __p2; \
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- uint32x4_t __ret; \
- __ret = __rev0 - __noswap_vmull_u16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __ret; \
- __ret = __s0 - vmull_s32(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
- int64x2_t __s0 = __p0; \
- int32x2_t __s1 = __p1; \
- int32x2_t __s2 = __p2; \
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
- int64x2_t __ret; \
- __ret = __rev0 - __noswap_vmull_s32(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __ret; \
- __ret = __s0 - vmull_s16(__s1, __builtin_shufflevector(__s2, __s2, __p3, __p3, __p3, __p3)); \
- __ret; \
- })
- #else
- #define vmlsl_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
- int32x4_t __s0 = __p0; \
- int16x4_t __s1 = __p1; \
- int16x4_t __s2 = __p2; \
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
- int32x4_t __ret; \
- __ret = __rev0 - __noswap_vmull_s16(__rev1, __builtin_shufflevector(__rev2, __rev2, __p3, __p3, __p3, __p3)); \
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
- __ret; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
- uint64x2_t __ret;
- __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
- return __ret;
- }
- #else
- __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
- uint64x2_t __ret;
- __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
- return __ret;
- }
- #else
- __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
- return __ret;
- }
- #else
- __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
- return __ret;
- }
- #else
- __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vset_lane_f16(__p0_260, __p1_260, __p2_260) __extension__ ({ \
- float16_t __s0_260 = __p0_260; \
- float16x4_t __s1_260 = __p1_260; \
- float16x4_t __ret_260; \
- float16_t __reint_260 = __s0_260; \
- float16x4_t __reint1_260 = __s1_260; \
- int16x4_t __reint2_260 = vset_lane_s16(*(int16_t *) &__reint_260, *(int16x4_t *) &__reint1_260, __p2_260); \
- __ret_260 = *(float16x4_t *) &__reint2_260; \
- __ret_260; \
- })
- #else
- #define vset_lane_f16(__p0_261, __p1_261, __p2_261) __extension__ ({ \
- float16_t __s0_261 = __p0_261; \
- float16x4_t __s1_261 = __p1_261; \
- float16x4_t __rev1_261; __rev1_261 = __builtin_shufflevector(__s1_261, __s1_261, 3, 2, 1, 0); \
- float16x4_t __ret_261; \
- float16_t __reint_261 = __s0_261; \
- float16x4_t __reint1_261 = __rev1_261; \
- int16x4_t __reint2_261 = __noswap_vset_lane_s16(*(int16_t *) &__reint_261, *(int16x4_t *) &__reint1_261, __p2_261); \
- __ret_261 = *(float16x4_t *) &__reint2_261; \
- __ret_261 = __builtin_shufflevector(__ret_261, __ret_261, 3, 2, 1, 0); \
- __ret_261; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vsetq_lane_f16(__p0_262, __p1_262, __p2_262) __extension__ ({ \
- float16_t __s0_262 = __p0_262; \
- float16x8_t __s1_262 = __p1_262; \
- float16x8_t __ret_262; \
- float16_t __reint_262 = __s0_262; \
- float16x8_t __reint1_262 = __s1_262; \
- int16x8_t __reint2_262 = vsetq_lane_s16(*(int16_t *) &__reint_262, *(int16x8_t *) &__reint1_262, __p2_262); \
- __ret_262 = *(float16x8_t *) &__reint2_262; \
- __ret_262; \
- })
- #else
- #define vsetq_lane_f16(__p0_263, __p1_263, __p2_263) __extension__ ({ \
- float16_t __s0_263 = __p0_263; \
- float16x8_t __s1_263 = __p1_263; \
- float16x8_t __rev1_263; __rev1_263 = __builtin_shufflevector(__s1_263, __s1_263, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16x8_t __ret_263; \
- float16_t __reint_263 = __s0_263; \
- float16x8_t __reint1_263 = __rev1_263; \
- int16x8_t __reint2_263 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_263, *(int16x8_t *) &__reint1_263, __p2_263); \
- __ret_263 = *(float16x8_t *) &__reint2_263; \
- __ret_263 = __builtin_shufflevector(__ret_263, __ret_263, 7, 6, 5, 4, 3, 2, 1, 0); \
- __ret_263; \
- })
- #endif
- #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
- #ifdef __LITTLE_ENDIAN__
- #define vmulh_lane_f16(__p0_264, __p1_264, __p2_264) __extension__ ({ \
- float16_t __s0_264 = __p0_264; \
- float16x4_t __s1_264 = __p1_264; \
- float16_t __ret_264; \
- __ret_264 = __s0_264 * vget_lane_f16(__s1_264, __p2_264); \
- __ret_264; \
- })
- #else
- #define vmulh_lane_f16(__p0_265, __p1_265, __p2_265) __extension__ ({ \
- float16_t __s0_265 = __p0_265; \
- float16x4_t __s1_265 = __p1_265; \
- float16x4_t __rev1_265; __rev1_265 = __builtin_shufflevector(__s1_265, __s1_265, 3, 2, 1, 0); \
- float16_t __ret_265; \
- __ret_265 = __s0_265 * __noswap_vget_lane_f16(__rev1_265, __p2_265); \
- __ret_265; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulh_laneq_f16(__p0_266, __p1_266, __p2_266) __extension__ ({ \
- float16_t __s0_266 = __p0_266; \
- float16x8_t __s1_266 = __p1_266; \
- float16_t __ret_266; \
- __ret_266 = __s0_266 * vgetq_lane_f16(__s1_266, __p2_266); \
- __ret_266; \
- })
- #else
- #define vmulh_laneq_f16(__p0_267, __p1_267, __p2_267) __extension__ ({ \
- float16_t __s0_267 = __p0_267; \
- float16x8_t __s1_267 = __p1_267; \
- float16x8_t __rev1_267; __rev1_267 = __builtin_shufflevector(__s1_267, __s1_267, 7, 6, 5, 4, 3, 2, 1, 0); \
- float16_t __ret_267; \
- __ret_267 = __s0_267 * __noswap_vgetq_lane_f16(__rev1_267, __p2_267); \
- __ret_267; \
- })
- #endif
- #endif
- #if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
- int32_t __ret;
- __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
- return __ret;
- }
- #else
- __ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
- int32_t __ret;
- __ret = __noswap_vqadds_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
- int16_t __ret;
- __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
- return __ret;
- }
- #else
- __ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
- int16_t __ret;
- __ret = __noswap_vqaddh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlahs_lane_s32(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
- int32_t __s0_268 = __p0_268; \
- int32_t __s1_268 = __p1_268; \
- int32x2_t __s2_268 = __p2_268; \
- int32_t __ret_268; \
- __ret_268 = vqadds_s32(__s0_268, vqrdmulhs_s32(__s1_268, vget_lane_s32(__s2_268, __p3_268))); \
- __ret_268; \
- })
- #else
- #define vqrdmlahs_lane_s32(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
- int32_t __s0_269 = __p0_269; \
- int32_t __s1_269 = __p1_269; \
- int32x2_t __s2_269 = __p2_269; \
- int32x2_t __rev2_269; __rev2_269 = __builtin_shufflevector(__s2_269, __s2_269, 1, 0); \
- int32_t __ret_269; \
- __ret_269 = __noswap_vqadds_s32(__s0_269, __noswap_vqrdmulhs_s32(__s1_269, __noswap_vget_lane_s32(__rev2_269, __p3_269))); \
- __ret_269; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlahh_lane_s16(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
- int16_t __s0_270 = __p0_270; \
- int16_t __s1_270 = __p1_270; \
- int16x4_t __s2_270 = __p2_270; \
- int16_t __ret_270; \
- __ret_270 = vqaddh_s16(__s0_270, vqrdmulhh_s16(__s1_270, vget_lane_s16(__s2_270, __p3_270))); \
- __ret_270; \
- })
- #else
- #define vqrdmlahh_lane_s16(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
- int16_t __s0_271 = __p0_271; \
- int16_t __s1_271 = __p1_271; \
- int16x4_t __s2_271 = __p2_271; \
- int16x4_t __rev2_271; __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 3, 2, 1, 0); \
- int16_t __ret_271; \
- __ret_271 = __noswap_vqaddh_s16(__s0_271, __noswap_vqrdmulhh_s16(__s1_271, __noswap_vget_lane_s16(__rev2_271, __p3_271))); \
- __ret_271; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlahs_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
- int32_t __s0_272 = __p0_272; \
- int32_t __s1_272 = __p1_272; \
- int32x4_t __s2_272 = __p2_272; \
- int32_t __ret_272; \
- __ret_272 = vqadds_s32(__s0_272, vqrdmulhs_s32(__s1_272, vgetq_lane_s32(__s2_272, __p3_272))); \
- __ret_272; \
- })
- #else
- #define vqrdmlahs_laneq_s32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
- int32_t __s0_273 = __p0_273; \
- int32_t __s1_273 = __p1_273; \
- int32x4_t __s2_273 = __p2_273; \
- int32x4_t __rev2_273; __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 3, 2, 1, 0); \
- int32_t __ret_273; \
- __ret_273 = __noswap_vqadds_s32(__s0_273, __noswap_vqrdmulhs_s32(__s1_273, __noswap_vgetq_lane_s32(__rev2_273, __p3_273))); \
- __ret_273; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlahh_laneq_s16(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
- int16_t __s0_274 = __p0_274; \
- int16_t __s1_274 = __p1_274; \
- int16x8_t __s2_274 = __p2_274; \
- int16_t __ret_274; \
- __ret_274 = vqaddh_s16(__s0_274, vqrdmulhh_s16(__s1_274, vgetq_lane_s16(__s2_274, __p3_274))); \
- __ret_274; \
- })
- #else
- #define vqrdmlahh_laneq_s16(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
- int16_t __s0_275 = __p0_275; \
- int16_t __s1_275 = __p1_275; \
- int16x8_t __s2_275 = __p2_275; \
- int16x8_t __rev2_275; __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16_t __ret_275; \
- __ret_275 = __noswap_vqaddh_s16(__s0_275, __noswap_vqrdmulhh_s16(__s1_275, __noswap_vgetq_lane_s16(__rev2_275, __p3_275))); \
- __ret_275; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
- int32_t __ret;
- __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
- return __ret;
- }
- #else
- __ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
- int32_t __ret;
- __ret = __noswap_vqsubs_s32(__p0, __noswap_vqrdmulhs_s32(__p1, __p2));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
- int16_t __ret;
- __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
- return __ret;
- }
- #else
- __ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
- int16_t __ret;
- __ret = __noswap_vqsubh_s16(__p0, __noswap_vqrdmulhh_s16(__p1, __p2));
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlshs_lane_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
- int32_t __s0_276 = __p0_276; \
- int32_t __s1_276 = __p1_276; \
- int32x2_t __s2_276 = __p2_276; \
- int32_t __ret_276; \
- __ret_276 = vqsubs_s32(__s0_276, vqrdmulhs_s32(__s1_276, vget_lane_s32(__s2_276, __p3_276))); \
- __ret_276; \
- })
- #else
- #define vqrdmlshs_lane_s32(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
- int32_t __s0_277 = __p0_277; \
- int32_t __s1_277 = __p1_277; \
- int32x2_t __s2_277 = __p2_277; \
- int32x2_t __rev2_277; __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 1, 0); \
- int32_t __ret_277; \
- __ret_277 = __noswap_vqsubs_s32(__s0_277, __noswap_vqrdmulhs_s32(__s1_277, __noswap_vget_lane_s32(__rev2_277, __p3_277))); \
- __ret_277; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlshh_lane_s16(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
- int16_t __s0_278 = __p0_278; \
- int16_t __s1_278 = __p1_278; \
- int16x4_t __s2_278 = __p2_278; \
- int16_t __ret_278; \
- __ret_278 = vqsubh_s16(__s0_278, vqrdmulhh_s16(__s1_278, vget_lane_s16(__s2_278, __p3_278))); \
- __ret_278; \
- })
- #else
- #define vqrdmlshh_lane_s16(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
- int16_t __s0_279 = __p0_279; \
- int16_t __s1_279 = __p1_279; \
- int16x4_t __s2_279 = __p2_279; \
- int16x4_t __rev2_279; __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 3, 2, 1, 0); \
- int16_t __ret_279; \
- __ret_279 = __noswap_vqsubh_s16(__s0_279, __noswap_vqrdmulhh_s16(__s1_279, __noswap_vget_lane_s16(__rev2_279, __p3_279))); \
- __ret_279; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlshs_laneq_s32(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
- int32_t __s0_280 = __p0_280; \
- int32_t __s1_280 = __p1_280; \
- int32x4_t __s2_280 = __p2_280; \
- int32_t __ret_280; \
- __ret_280 = vqsubs_s32(__s0_280, vqrdmulhs_s32(__s1_280, vgetq_lane_s32(__s2_280, __p3_280))); \
- __ret_280; \
- })
- #else
- #define vqrdmlshs_laneq_s32(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
- int32_t __s0_281 = __p0_281; \
- int32_t __s1_281 = __p1_281; \
- int32x4_t __s2_281 = __p2_281; \
- int32x4_t __rev2_281; __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \
- int32_t __ret_281; \
- __ret_281 = __noswap_vqsubs_s32(__s0_281, __noswap_vqrdmulhs_s32(__s1_281, __noswap_vgetq_lane_s32(__rev2_281, __p3_281))); \
- __ret_281; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vqrdmlshh_laneq_s16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
- int16_t __s0_282 = __p0_282; \
- int16_t __s1_282 = __p1_282; \
- int16x8_t __s2_282 = __p2_282; \
- int16_t __ret_282; \
- __ret_282 = vqsubh_s16(__s0_282, vqrdmulhh_s16(__s1_282, vgetq_lane_s16(__s2_282, __p3_282))); \
- __ret_282; \
- })
- #else
- #define vqrdmlshh_laneq_s16(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
- int16_t __s0_283 = __p0_283; \
- int16_t __s1_283 = __p1_283; \
- int16x8_t __s2_283 = __p2_283; \
- int16x8_t __rev2_283; __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \
- int16_t __ret_283; \
- __ret_283 = __noswap_vqsubh_s16(__s0_283, __noswap_vqrdmulhh_s16(__s1_283, __noswap_vgetq_lane_s16(__rev2_283, __p3_283))); \
- __ret_283; \
- })
- #endif
- #endif
- #if defined(__aarch64__)
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint16x8_t __ret;
- __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
- return __ret;
- }
- #else
- __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint64x2_t __ret;
- __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
- return __ret;
- }
- #else
- __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint32x4_t __ret;
- __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
- return __ret;
- }
- #else
- __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
- int16x8_t __ret;
- __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
- return __ret;
- }
- #else
- __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
- int64x2_t __ret;
- __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
- return __ret;
- }
- #else
- __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
- int32x4_t __ret;
- __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
- return __ret;
- }
- #else
- __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint16x8_t __ret;
- __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
- uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint64x2_t __ret;
- __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint32x4_t __ret;
- __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
- int16x8_t __ret;
- __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
- return __ret;
- }
- #else
- __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
- int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
- int64x2_t __ret;
- __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
- return __ret;
- }
- #else
- __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
- int32x4_t __ret;
- __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
- return __ret;
- }
- #else
- __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
- uint16x8_t __ret;
- __ret = __p0 + vmovl_high_u8(__p1);
- return __ret;
- }
- #else
- __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
- uint64x2_t __ret;
- __ret = __p0 + vmovl_high_u32(__p1);
- return __ret;
- }
- #else
- __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
- uint32x4_t __ret;
- __ret = __p0 + vmovl_high_u16(__p1);
- return __ret;
- }
- #else
- __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
- int16x8_t __ret;
- __ret = __p0 + vmovl_high_s8(__p1);
- return __ret;
- }
- #else
- __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
- int64x2_t __ret;
- __ret = __p0 + vmovl_high_s32(__p1);
- return __ret;
- }
- #else
- __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
- int32x4_t __ret;
- __ret = __p0 + vmovl_high_s16(__p1);
- return __ret;
- }
- #else
- __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_p64(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
- poly64x2_t __s0_284 = __p0_284; \
- poly64x1_t __s2_284 = __p2_284; \
- poly64x2_t __ret_284; \
- __ret_284 = vsetq_lane_p64(vget_lane_p64(__s2_284, __p3_284), __s0_284, __p1_284); \
- __ret_284; \
- })
- #else
- #define vcopyq_lane_p64(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
- poly64x2_t __s0_285 = __p0_285; \
- poly64x1_t __s2_285 = __p2_285; \
- poly64x2_t __rev0_285; __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 1, 0); \
- poly64x2_t __ret_285; \
- __ret_285 = __noswap_vsetq_lane_p64(__noswap_vget_lane_p64(__s2_285, __p3_285), __rev0_285, __p1_285); \
- __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 1, 0); \
- __ret_285; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_lane_f64(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
- float64x2_t __s0_286 = __p0_286; \
- float64x1_t __s2_286 = __p2_286; \
- float64x2_t __ret_286; \
- __ret_286 = vsetq_lane_f64(vget_lane_f64(__s2_286, __p3_286), __s0_286, __p1_286); \
- __ret_286; \
- })
- #else
- #define vcopyq_lane_f64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
- float64x2_t __s0_287 = __p0_287; \
- float64x1_t __s2_287 = __p2_287; \
- float64x2_t __rev0_287; __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 1, 0); \
- float64x2_t __ret_287; \
- __ret_287 = __noswap_vsetq_lane_f64(__noswap_vget_lane_f64(__s2_287, __p3_287), __rev0_287, __p1_287); \
- __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 1, 0); \
- __ret_287; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_p64(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
- poly64x1_t __s0_288 = __p0_288; \
- poly64x1_t __s2_288 = __p2_288; \
- poly64x1_t __ret_288; \
- __ret_288 = vset_lane_p64(vget_lane_p64(__s2_288, __p3_288), __s0_288, __p1_288); \
- __ret_288; \
- })
- #else
- #define vcopy_lane_p64(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
- poly64x1_t __s0_289 = __p0_289; \
- poly64x1_t __s2_289 = __p2_289; \
- poly64x1_t __ret_289; \
- __ret_289 = __noswap_vset_lane_p64(__noswap_vget_lane_p64(__s2_289, __p3_289), __s0_289, __p1_289); \
- __ret_289; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_lane_f64(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
- float64x1_t __s0_290 = __p0_290; \
- float64x1_t __s2_290 = __p2_290; \
- float64x1_t __ret_290; \
- __ret_290 = vset_lane_f64(vget_lane_f64(__s2_290, __p3_290), __s0_290, __p1_290); \
- __ret_290; \
- })
- #else
- #define vcopy_lane_f64(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
- float64x1_t __s0_291 = __p0_291; \
- float64x1_t __s2_291 = __p2_291; \
- float64x1_t __ret_291; \
- __ret_291 = __noswap_vset_lane_f64(__noswap_vget_lane_f64(__s2_291, __p3_291), __s0_291, __p1_291); \
- __ret_291; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_p64(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
- poly64x2_t __s0_292 = __p0_292; \
- poly64x2_t __s2_292 = __p2_292; \
- poly64x2_t __ret_292; \
- __ret_292 = vsetq_lane_p64(vgetq_lane_p64(__s2_292, __p3_292), __s0_292, __p1_292); \
- __ret_292; \
- })
- #else
- #define vcopyq_laneq_p64(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \
- poly64x2_t __s0_293 = __p0_293; \
- poly64x2_t __s2_293 = __p2_293; \
- poly64x2_t __rev0_293; __rev0_293 = __builtin_shufflevector(__s0_293, __s0_293, 1, 0); \
- poly64x2_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, 1, 0); \
- poly64x2_t __ret_293; \
- __ret_293 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_293, __p3_293), __rev0_293, __p1_293); \
- __ret_293 = __builtin_shufflevector(__ret_293, __ret_293, 1, 0); \
- __ret_293; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopyq_laneq_f64(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \
- float64x2_t __s0_294 = __p0_294; \
- float64x2_t __s2_294 = __p2_294; \
- float64x2_t __ret_294; \
- __ret_294 = vsetq_lane_f64(vgetq_lane_f64(__s2_294, __p3_294), __s0_294, __p1_294); \
- __ret_294; \
- })
- #else
- #define vcopyq_laneq_f64(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \
- float64x2_t __s0_295 = __p0_295; \
- float64x2_t __s2_295 = __p2_295; \
- float64x2_t __rev0_295; __rev0_295 = __builtin_shufflevector(__s0_295, __s0_295, 1, 0); \
- float64x2_t __rev2_295; __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \
- float64x2_t __ret_295; \
- __ret_295 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_295, __p3_295), __rev0_295, __p1_295); \
- __ret_295 = __builtin_shufflevector(__ret_295, __ret_295, 1, 0); \
- __ret_295; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_p64(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \
- poly64x1_t __s0_296 = __p0_296; \
- poly64x2_t __s2_296 = __p2_296; \
- poly64x1_t __ret_296; \
- __ret_296 = vset_lane_p64(vgetq_lane_p64(__s2_296, __p3_296), __s0_296, __p1_296); \
- __ret_296; \
- })
- #else
- #define vcopy_laneq_p64(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \
- poly64x1_t __s0_297 = __p0_297; \
- poly64x2_t __s2_297 = __p2_297; \
- poly64x2_t __rev2_297; __rev2_297 = __builtin_shufflevector(__s2_297, __s2_297, 1, 0); \
- poly64x1_t __ret_297; \
- __ret_297 = __noswap_vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_297, __p3_297), __s0_297, __p1_297); \
- __ret_297; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vcopy_laneq_f64(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \
- float64x1_t __s0_298 = __p0_298; \
- float64x2_t __s2_298 = __p2_298; \
- float64x1_t __ret_298; \
- __ret_298 = vset_lane_f64(vgetq_lane_f64(__s2_298, __p3_298), __s0_298, __p1_298); \
- __ret_298; \
- })
- #else
- #define vcopy_laneq_f64(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \
- float64x1_t __s0_299 = __p0_299; \
- float64x2_t __s2_299 = __p2_299; \
- float64x2_t __rev2_299; __rev2_299 = __builtin_shufflevector(__s2_299, __s2_299, 1, 0); \
- float64x1_t __ret_299; \
- __ret_299 = __noswap_vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_299, __p3_299), __s0_299, __p1_299); \
- __ret_299; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint16x8_t __ret;
- __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
- return __ret;
- }
- #else
- __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint64x2_t __ret;
- __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
- return __ret;
- }
- #else
- __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint32x4_t __ret;
- __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
- return __ret;
- }
- #else
- __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int16x8_t __ret;
- __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
- return __ret;
- }
- #else
- __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int64x2_t __ret;
- __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
- return __ret;
- }
- #else
- __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int32x4_t __ret;
- __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
- return __ret;
- }
- #else
- __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
- uint64x2_t __ret;
- __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
- return __ret;
- }
- #else
- __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
- uint32x4_t __ret;
- __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
- return __ret;
- }
- #else
- __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
- return __ret;
- }
- #else
- __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
- return __ret;
- }
- #else
- __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint16x8_t __ret;
- __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
- return __ret;
- }
- #else
- __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint64x2_t __ret;
- __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
- return __ret;
- }
- #else
- __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint32x4_t __ret;
- __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
- return __ret;
- }
- #else
- __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int16x8_t __ret;
- __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
- return __ret;
- }
- #else
- __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int64x2_t __ret;
- __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
- return __ret;
- }
- #else
- __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int32x4_t __ret;
- __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
- return __ret;
- }
- #else
- __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
- uint64x2_t __ret;
- __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
- return __ret;
- }
- #else
- __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
- uint32x4_t __ret;
- __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
- return __ret;
- }
- #else
- __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
- int64x2_t __ret;
- __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
- return __ret;
- }
- #else
- __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
- int32x4_t __ret;
- __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
- return __ret;
- }
- #else
- __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulx_lane_f64(__p0_300, __p1_300, __p2_300) __extension__ ({ \
- float64x1_t __s0_300 = __p0_300; \
- float64x1_t __s1_300 = __p1_300; \
- float64x1_t __ret_300; \
- float64_t __x_300 = vget_lane_f64(__s0_300, 0); \
- float64_t __y_300 = vget_lane_f64(__s1_300, __p2_300); \
- float64_t __z_300 = vmulxd_f64(__x_300, __y_300); \
- __ret_300 = vset_lane_f64(__z_300, __s0_300, __p2_300); \
- __ret_300; \
- })
- #else
- #define vmulx_lane_f64(__p0_301, __p1_301, __p2_301) __extension__ ({ \
- float64x1_t __s0_301 = __p0_301; \
- float64x1_t __s1_301 = __p1_301; \
- float64x1_t __ret_301; \
- float64_t __x_301 = __noswap_vget_lane_f64(__s0_301, 0); \
- float64_t __y_301 = __noswap_vget_lane_f64(__s1_301, __p2_301); \
- float64_t __z_301 = __noswap_vmulxd_f64(__x_301, __y_301); \
- __ret_301 = __noswap_vset_lane_f64(__z_301, __s0_301, __p2_301); \
- __ret_301; \
- })
- #endif
- #ifdef __LITTLE_ENDIAN__
- #define vmulx_laneq_f64(__p0_302, __p1_302, __p2_302) __extension__ ({ \
- float64x1_t __s0_302 = __p0_302; \
- float64x2_t __s1_302 = __p1_302; \
- float64x1_t __ret_302; \
- float64_t __x_302 = vget_lane_f64(__s0_302, 0); \
- float64_t __y_302 = vgetq_lane_f64(__s1_302, __p2_302); \
- float64_t __z_302 = vmulxd_f64(__x_302, __y_302); \
- __ret_302 = vset_lane_f64(__z_302, __s0_302, 0); \
- __ret_302; \
- })
- #else
- #define vmulx_laneq_f64(__p0_303, __p1_303, __p2_303) __extension__ ({ \
- float64x1_t __s0_303 = __p0_303; \
- float64x2_t __s1_303 = __p1_303; \
- float64x2_t __rev1_303; __rev1_303 = __builtin_shufflevector(__s1_303, __s1_303, 1, 0); \
- float64x1_t __ret_303; \
- float64_t __x_303 = __noswap_vget_lane_f64(__s0_303, 0); \
- float64_t __y_303 = __noswap_vgetq_lane_f64(__rev1_303, __p2_303); \
- float64_t __z_303 = __noswap_vmulxd_f64(__x_303, __y_303); \
- __ret_303 = __noswap_vset_lane_f64(__z_303, __s0_303, 0); \
- __ret_303; \
- })
- #endif
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 + vabdl_u8(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
- uint16x8_t __ret;
- __ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint64x2_t __ret;
- __ret = __p0 + vabdl_u32(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- uint64x2_t __ret;
- __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
- uint64x2_t __ret;
- __ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 + vabdl_u16(__p1, __p2);
- return __ret;
- }
- #else
- __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
- uint32x4_t __ret;
- __ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int16x8_t __ret;
- __ret = __p0 + vabdl_s8(__p1, __p2);
- return __ret;
- }
- #else
- __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- __ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
- int16x8_t __ret;
- __ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __ret;
- __ret = __p0 + vabdl_s32(__p1, __p2);
- return __ret;
- }
- #else
- __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
- int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
- int64x2_t __ret;
- __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- __ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
- int64x2_t __ret;
- __ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __ret;
- __ret = __p0 + vabdl_s16(__p1, __p2);
- return __ret;
- }
- #else
- __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- __ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
- int32x4_t __ret;
- __ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
- return __ret;
- }
- #endif
- #if defined(__aarch64__)
- #ifdef __LITTLE_ENDIAN__
- __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint16x8_t __ret;
- __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
- return __ret;
- }
- #else
- __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
- uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __ret;
- __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint64x2_t __ret;
- __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
- return __ret;
- }
- #else
- __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
- uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- uint64x2_t __ret;
- __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint32x4_t __ret;
- __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
- return __ret;
- }
- #else
- __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
- uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- uint32x4_t __ret;
- __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int16x8_t __ret;
- __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
- return __ret;
- }
- #else
- __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
- int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __ret;
- __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int64x2_t __ret;
- __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
- return __ret;
- }
- #else
- __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
- int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
- int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
- int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
- int64x2_t __ret;
- __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
- return __ret;
- }
- #endif
- #ifdef __LITTLE_ENDIAN__
- __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int32x4_t __ret;
- __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
- return __ret;
- }
- #else
- __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
- int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
- int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
- int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
- int32x4_t __ret;
- __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
- __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
- return __ret;
- }
- #endif
- #endif
- #undef __ai
- #endif /* __ARM_NEON_H */
|