1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900 |
- #include <cuda_fp16.h>
- #include <cuda_runtime.h>
- #include <torch/all.h>
- #include <torch/python.h>
- #include <c10/cuda/CUDAGuard.h>
- #define QK_K 256
- #define K_QUANTS_PER_ITERATION 2
- #define WARP_SIZE 32
- #define K_SCALE_SIZE 12
- #define CUDA_DEQUANTIZE_BLOCK_SIZE 256
- #define CUDA_QUANTIZE_BLOCK_SIZE 256
- #define GGML_CUDA_DMMV_X 32
- #define GGML_CUDA_MMV_Y 1
- // Data Structures
- // QK = number of values after dequantization
- // QR = QK / number of values before dequantization
- // QI = number of 32 bit integers before dequantization
- #define QK4_0 32
- #define QR4_0 2
- #define QI4_0 (QK4_0 / (4 * QR4_0))
- typedef struct {
- half d; // delta
- uint8_t qs[QK4_0 / 2]; // nibbles / quants
- } block_q4_0;
- #define QK4_1 32
- #define QR4_1 2
- #define QI4_1 (QK4_1 / (4 * QR4_1))
- typedef struct {
- half2 dm; // dm.x = delta, dm.y = min
- uint8_t qs[QK4_1 / 2]; // nibbles / quants
- } block_q4_1;
- #define QK5_0 32
- #define QR5_0 2
- #define QI5_0 (QK5_0 / (4 * QR5_0))
- typedef struct {
- half d; // delta
- uint8_t qh[4]; // 5-th bit of quants
- uint8_t qs[QK5_0 / 2]; // nibbles / quants
- } block_q5_0;
- #define QK5_1 32
- #define QR5_1 2
- #define QI5_1 (QK5_1 / (4 * QR5_1))
- typedef struct {
- half2 dm; // dm.x = delta, dm.y = min
- uint8_t qh[4]; // 5-th bit of quants
- uint8_t qs[QK5_1 / 2]; // nibbles / quants
- } block_q5_1;
- #define QK8_0 32
- #define QR8_0 1
- #define QI8_0 (QK8_0 / (4 * QR8_0))
- typedef struct {
- half d; // delta
- int8_t qs[QK8_0]; // quants
- } block_q8_0;
- #define QK8_1 32
- #define QR8_1 1
- #define QI8_1 (QK8_1 / (4 * QR8_1))
- typedef struct {
- half2 ds; // ds.x = delta, ds.y = sum
- int8_t qs[QK8_0]; // quants
- } block_q8_1;
- #define QR2_K 4
- #define QI2_K (QK_K / (4*QR2_K))
- typedef struct {
- uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
- uint8_t qs[QK_K/4]; // quants
- half2 dm; // super-block scale for quantized scales/mins
- } block_q2_K;
- #define QR3_K 4
- #define QI3_K (QK_K / (4*QR3_K))
- typedef struct {
- uint8_t hmask[QK_K/8]; // quants - high bit
- uint8_t qs[QK_K/4]; // quants - low 2 bits
- uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits
- half d; // super-block scale
- } block_q3_K;
- #define QR4_K 2
- #define QI4_K (QK_K / (4*QR4_K))
- typedef struct {
- half2 dm; // super-block scale for quantized scales/mins
- uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits
- uint8_t qs[QK_K/2]; // 4--bit quants
- } block_q4_K;
- #define QR5_K 2
- #define QI5_K (QK_K / (4*QR5_K))
- typedef struct {
- half2 dm; // super-block scale for quantized scales/mins
- uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
- uint8_t qh[QK_K/8]; // quants, high bit
- uint8_t qs[QK_K/2]; // quants, low 4 bits
- } block_q5_K;
- #define QR6_K 2
- #define QI6_K (QK_K / (4*QR6_K))
- typedef struct {
- uint8_t ql[QK_K/2]; // quants, lower 4 bits
- uint8_t qh[QK_K/4]; // quants, upper 2 bits
- int8_t scales[QK_K/16]; // scales
- half d; // delta
- } block_q6_K;
- #define QR2_XXS 8
- #define QI2_XXS (QK_K / (4*QR2_XXS))
- typedef struct {
- half d;
- uint16_t qs[QK_K/8];
- } block_iq2_xxs;
- #define QR2_XS 8
- #define QI2_XS (QK_K / (4*QR2_XS))
- typedef struct {
- half d;
- uint16_t qs[QK_K/8];
- uint8_t scales[QK_K/32];
- } block_iq2_xs;
- #define QR2_S 8
- #define QI2_S (QK_K / (4*QR2_S))
- typedef struct {
- half d;
- uint8_t qs[QK_K/4];
- uint8_t qh[QK_K/32];
- uint8_t scales[QK_K/32];
- } block_iq2_s;
- #define QR3_XXS 8
- #define QI3_XXS (QK_K / (4*QR3_XXS))
- typedef struct {
- half d;
- uint8_t qs[3*(QK_K/8)];
- } block_iq3_xxs;
- #define QR3_XS 8
- #define QI3_XS (QK_K / (4*QR3_XS))
- #define IQ3S_N_SCALE QK_K/64
- typedef struct {
- half d;
- uint8_t qs[QK_K/4];
- uint8_t qh[QK_K/32];
- uint8_t signs[QK_K/8];
- uint8_t scales[IQ3S_N_SCALE];
- } block_iq3_s;
- #define QR1_S 8
- #define QI1_S (QK_K / (4*QR1_S))
- typedef struct {
- half d;
- uint8_t qs[QK_K/8];
- uint8_t scales[QK_K/16];
- } block_iq1_s;
- #define QK4_NL 32
- #define QR4_NL 2
- #define QI4_NL (QK4_NL / (4*QR4_NL))
- typedef struct {
- half d;
- uint8_t qs[QK4_NL/2];
- } block_iq4_nl;
- #define QR4_XS 8
- #define QI4_XS (QK_K / (4*QR4_XS))
- typedef struct {
- half d;
- uint16_t scales_h;
- uint8_t scales_l[QK_K/64];
- uint8_t qs[QK_K/2];
- } block_iq4_xs;
- static const __device__ uint64_t iq2xxs_grid[256] = {
- 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
- 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x08080808082b0808,
- 0x08080808082b082b, 0x08080808082b2b08, 0x08080808082b2b2b, 0x0808080819080819,
- 0x0808080819081908, 0x0808080819190808, 0x0808080819192b08, 0x08080808192b0819,
- 0x08080808192b1908, 0x080808082b080808, 0x080808082b08082b, 0x080808082b082b2b,
- 0x080808082b2b082b, 0x0808081908080819, 0x0808081908081908, 0x0808081908190808,
- 0x0808081908191919, 0x0808081919080808, 0x080808192b081908, 0x080808192b192b08,
- 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b082b082b, 0x0808082b2b08082b,
- 0x0808190808080819, 0x0808190808081908, 0x0808190808190808, 0x08081908082b0819,
- 0x08081908082b1908, 0x0808190819080808, 0x080819081908082b, 0x0808190819082b08,
- 0x08081908192b0808, 0x080819082b080819, 0x080819082b081908, 0x080819082b190808,
- 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b, 0x0808191908082b08,
- 0x08081919082b0808, 0x080819191908192b, 0x08081919192b2b19, 0x080819192b080808,
- 0x080819192b190819, 0x0808192b08082b19, 0x0808192b08190808, 0x0808192b19080808,
- 0x0808192b2b081908, 0x0808192b2b2b1908, 0x08082b0808080808, 0x08082b0808081919,
- 0x08082b0808082b08, 0x08082b0808191908, 0x08082b08082b2b08, 0x08082b0819080819,
- 0x08082b0819081908, 0x08082b0819190808, 0x08082b081919082b, 0x08082b082b082b08,
- 0x08082b1908081908, 0x08082b1919080808, 0x08082b2b0808082b, 0x08082b2b08191908,
- 0x0819080808080819, 0x0819080808081908, 0x0819080808190808, 0x08190808082b0819,
- 0x0819080819080808, 0x08190808192b0808, 0x081908082b081908, 0x081908082b190808,
- 0x081908082b191919, 0x0819081908080808, 0x0819081908082b08, 0x08190819082b0808,
- 0x0819081919190808, 0x0819081919192b2b, 0x081908192b080808, 0x0819082b082b1908,
- 0x0819082b19081919, 0x0819190808080808, 0x0819190808082b08, 0x08191908082b0808,
- 0x08191908082b1919, 0x0819190819082b19, 0x081919082b080808, 0x0819191908192b08,
- 0x08191919192b082b, 0x0819192b08080808, 0x0819192b0819192b, 0x08192b0808080819,
- 0x08192b0808081908, 0x08192b0808190808, 0x08192b0819080808, 0x08192b082b080819,
- 0x08192b1908080808, 0x08192b1908081919, 0x08192b192b2b0808, 0x08192b2b19190819,
- 0x082b080808080808, 0x082b08080808082b, 0x082b080808082b2b, 0x082b080819081908,
- 0x082b0808192b0819, 0x082b08082b080808, 0x082b08082b08082b, 0x082b0819082b2b19,
- 0x082b081919082b08, 0x082b082b08080808, 0x082b082b0808082b, 0x082b190808080819,
- 0x082b190808081908, 0x082b190808190808, 0x082b190819080808, 0x082b19081919192b,
- 0x082b191908080808, 0x082b191919080819, 0x082b1919192b1908, 0x082b192b2b190808,
- 0x082b2b0808082b08, 0x082b2b08082b0808, 0x082b2b082b191908, 0x082b2b2b19081908,
- 0x1908080808080819, 0x1908080808081908, 0x1908080808190808, 0x1908080808192b08,
- 0x19080808082b0819, 0x19080808082b1908, 0x1908080819080808, 0x1908080819082b08,
- 0x190808081919192b, 0x19080808192b0808, 0x190808082b080819, 0x190808082b081908,
- 0x190808082b190808, 0x1908081908080808, 0x19080819082b0808, 0x19080819192b0819,
- 0x190808192b080808, 0x190808192b081919, 0x1908082b08080819, 0x1908082b08190808,
- 0x1908082b19082b08, 0x1908082b1919192b, 0x1908082b192b2b08, 0x1908190808080808,
- 0x1908190808082b08, 0x19081908082b0808, 0x190819082b080808, 0x190819082b192b19,
- 0x190819190819082b, 0x19081919082b1908, 0x1908192b08080808, 0x19082b0808080819,
- 0x19082b0808081908, 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919,
- 0x19082b1908080808, 0x19082b1919192b08, 0x19082b19192b0819, 0x19082b192b08082b,
- 0x19082b2b19081919, 0x19082b2b2b190808, 0x1919080808080808, 0x1919080808082b08,
- 0x1919080808190819, 0x1919080808192b19, 0x19190808082b0808, 0x191908082b080808,
- 0x191908082b082b08, 0x1919081908081908, 0x191908191908082b, 0x191908192b2b1908,
- 0x1919082b2b190819, 0x191919082b190808, 0x191919082b19082b, 0x1919191908082b2b,
- 0x1919192b08080819, 0x1919192b19191908, 0x19192b0808080808, 0x19192b0808190819,
- 0x19192b0808192b19, 0x19192b08192b1908, 0x19192b1919080808, 0x19192b2b08082b08,
- 0x192b080808081908, 0x192b080808190808, 0x192b080819080808, 0x192b0808192b2b08,
- 0x192b081908080808, 0x192b081919191919, 0x192b082b08192b08, 0x192b082b192b0808,
- 0x192b190808080808, 0x192b190808081919, 0x192b191908190808, 0x192b19190819082b,
- 0x192b19192b081908, 0x192b2b081908082b, 0x2b08080808080808, 0x2b0808080808082b,
- 0x2b08080808082b2b, 0x2b08080819080819, 0x2b0808082b08082b, 0x2b08081908081908,
- 0x2b08081908192b08, 0x2b08081919080808, 0x2b08082b08190819, 0x2b08190808080819,
- 0x2b08190808081908, 0x2b08190808190808, 0x2b08190808191919, 0x2b08190819080808,
- 0x2b081908192b0808, 0x2b08191908080808, 0x2b0819191908192b, 0x2b0819192b191908,
- 0x2b08192b08082b19, 0x2b08192b19080808, 0x2b08192b192b0808, 0x2b082b080808082b,
- 0x2b082b1908081908, 0x2b082b2b08190819, 0x2b19080808081908, 0x2b19080808190808,
- 0x2b190808082b1908, 0x2b19080819080808, 0x2b1908082b2b0819, 0x2b1908190819192b,
- 0x2b1908192b080808, 0x2b19082b19081919, 0x2b19190808080808, 0x2b191908082b082b,
- 0x2b19190819081908, 0x2b19191919190819, 0x2b192b082b080819, 0x2b192b19082b0808,
- 0x2b2b08080808082b, 0x2b2b080819190808, 0x2b2b08082b081919, 0x2b2b081908082b19,
- 0x2b2b082b08080808, 0x2b2b190808192b08, 0x2b2b2b0819190808, 0x2b2b2b1908081908,
- };
- static const __device__ uint64_t iq2xs_grid[512] = {
- 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
- 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b,
- 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919,
- 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b,
- 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919,
- 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x080808082b080808,
- 0x080808082b08082b, 0x080808082b081919, 0x080808082b082b08, 0x080808082b190819,
- 0x080808082b191908, 0x080808082b192b19, 0x080808082b2b0808, 0x0808081908080819,
- 0x0808081908081908, 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808,
- 0x080808190819082b, 0x0808081908191919, 0x0808081908192b08, 0x0808081908192b2b,
- 0x08080819082b0819, 0x08080819082b1908, 0x0808081919080808, 0x080808191908082b,
- 0x0808081919081919, 0x0808081919082b08, 0x0808081919190819, 0x0808081919191908,
- 0x08080819192b0808, 0x08080819192b2b08, 0x080808192b080819, 0x080808192b081908,
- 0x080808192b190808, 0x0808082b08080808, 0x0808082b0808082b, 0x0808082b08081919,
- 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908, 0x0808082b082b0808,
- 0x0808082b19080819, 0x0808082b19081908, 0x0808082b19190808, 0x0808082b19191919,
- 0x0808082b2b080808, 0x0808082b2b082b2b, 0x0808190808080819, 0x0808190808081908,
- 0x080819080808192b, 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b,
- 0x0808190808191919, 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908,
- 0x0808190819080808, 0x080819081908082b, 0x0808190819081919, 0x0808190819082b08,
- 0x0808190819190819, 0x0808190819191908, 0x080819081919192b, 0x08081908192b0808,
- 0x080819082b080819, 0x080819082b081908, 0x080819082b190808, 0x0808191908080808,
- 0x080819190808082b, 0x0808191908081919, 0x0808191908082b08, 0x0808191908190819,
- 0x0808191908191908, 0x08081919082b0808, 0x0808191919080819, 0x0808191919081908,
- 0x0808191919190808, 0x08081919192b0819, 0x080819192b080808, 0x0808192b08080819,
- 0x0808192b08081908, 0x0808192b08190808, 0x0808192b082b192b, 0x0808192b19080808,
- 0x0808192b1908082b, 0x0808192b2b081908, 0x08082b0808080808, 0x08082b080808082b,
- 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808082b2b, 0x08082b0808190819,
- 0x08082b0808191908, 0x08082b08082b0808, 0x08082b08082b1919, 0x08082b0819080819,
- 0x08082b0819081908, 0x08082b0819190808, 0x08082b0819192b08, 0x08082b082b080808,
- 0x08082b082b2b0808, 0x08082b082b2b2b2b, 0x08082b1908080819, 0x08082b1908081908,
- 0x08082b1908190808, 0x08082b1919080808, 0x08082b192b080819, 0x08082b192b082b19,
- 0x08082b2b08080808, 0x08082b2b082b0808, 0x08082b2b082b2b08, 0x08082b2b2b19192b,
- 0x08082b2b2b2b0808, 0x0819080808080819, 0x0819080808081908, 0x081908080808192b,
- 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b, 0x0819080808191919,
- 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908, 0x0819080819080808,
- 0x081908081908082b, 0x0819080819081919, 0x0819080819082b08, 0x0819080819190819,
- 0x0819080819191908, 0x08190808192b0808, 0x08190808192b2b2b, 0x081908082b080819,
- 0x081908082b081908, 0x081908082b190808, 0x0819081908080808, 0x081908190808082b,
- 0x0819081908081919, 0x0819081908082b08, 0x0819081908190819, 0x0819081908191908,
- 0x08190819082b0808, 0x0819081919080819, 0x0819081919081908, 0x0819081919190808,
- 0x081908192b080808, 0x081908192b191908, 0x081908192b19192b, 0x0819082b08080819,
- 0x0819082b08081908, 0x0819082b0808192b, 0x0819082b08190808, 0x0819082b19080808,
- 0x0819082b192b0808, 0x0819190808080808, 0x081919080808082b, 0x0819190808081919,
- 0x0819190808082b08, 0x0819190808190819, 0x0819190808191908, 0x08191908082b0808,
- 0x0819190819080819, 0x0819190819081908, 0x0819190819082b19, 0x0819190819190808,
- 0x08191908192b1908, 0x081919082b080808, 0x0819191908080819, 0x0819191908081908,
- 0x0819191908190808, 0x0819191919080808, 0x0819192b08080808, 0x0819192b08191908,
- 0x0819192b19082b19, 0x08192b0808080819, 0x08192b0808081908, 0x08192b0808190808,
- 0x08192b080819082b, 0x08192b0819080808, 0x08192b0819191908, 0x08192b082b08192b,
- 0x08192b1908080808, 0x08192b1908081919, 0x08192b19192b192b, 0x08192b2b19190819,
- 0x08192b2b2b2b2b19, 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919,
- 0x082b080808082b08, 0x082b080808082b2b, 0x082b080808190819, 0x082b080808191908,
- 0x082b0808082b0808, 0x082b080819080819, 0x082b080819081908, 0x082b080819190808,
- 0x082b08082b080808, 0x082b08082b2b0808, 0x082b081908080819, 0x082b081908081908,
- 0x082b081908190808, 0x082b081919080808, 0x082b081919082b08, 0x082b0819192b1919,
- 0x082b082b08080808, 0x082b082b082b082b, 0x082b082b2b080808, 0x082b082b2b2b2b08,
- 0x082b190808080819, 0x082b190808081908, 0x082b190808190808, 0x082b1908082b2b19,
- 0x082b190819080808, 0x082b191908080808, 0x082b191919080819, 0x082b19191919082b,
- 0x082b19192b192b19, 0x082b192b08080819, 0x082b192b08192b2b, 0x082b192b2b2b192b,
- 0x082b2b0808080808, 0x082b2b0808082b08, 0x082b2b0808082b2b, 0x082b2b08082b0808,
- 0x082b2b0819191919, 0x082b2b082b082b08, 0x082b2b082b2b082b, 0x082b2b19192b2b08,
- 0x082b2b192b190808, 0x082b2b2b08082b08, 0x082b2b2b082b0808, 0x082b2b2b2b08082b,
- 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819, 0x1908080808081908,
- 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808, 0x190808080819082b,
- 0x1908080808191919, 0x1908080808192b08, 0x19080808082b0819, 0x19080808082b1908,
- 0x1908080819080808, 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08,
- 0x1908080819082b2b, 0x1908080819190819, 0x1908080819191908, 0x19080808192b0808,
- 0x19080808192b1919, 0x190808082b080819, 0x190808082b081908, 0x190808082b190808,
- 0x1908081908080808, 0x190808190808082b, 0x1908081908081919, 0x1908081908082b08,
- 0x1908081908190819, 0x1908081908191908, 0x19080819082b0808, 0x1908081919080819,
- 0x1908081919081908, 0x1908081919190808, 0x190808192b080808, 0x190808192b081919,
- 0x190808192b2b082b, 0x1908082b08080819, 0x1908082b08081908, 0x1908082b08190808,
- 0x1908082b0819082b, 0x1908082b082b2b19, 0x1908082b19080808, 0x1908190808080808,
- 0x190819080808082b, 0x1908190808081919, 0x1908190808082b08, 0x1908190808190819,
- 0x1908190808191908, 0x1908190808192b19, 0x19081908082b0808, 0x1908190819080819,
- 0x1908190819081908, 0x1908190819190808, 0x190819082b080808, 0x190819082b191908,
- 0x1908191908080819, 0x1908191908081908, 0x1908191908190808, 0x19081919082b1908,
- 0x1908191919080808, 0x190819192b192b2b, 0x1908192b08080808, 0x1908192b08082b2b,
- 0x1908192b19081908, 0x1908192b19190808, 0x19082b0808080819, 0x19082b0808081908,
- 0x19082b0808190808, 0x19082b0819080808, 0x19082b0819081919, 0x19082b0819191908,
- 0x19082b08192b082b, 0x19082b1908080808, 0x19082b1908190819, 0x19082b1919081908,
- 0x19082b1919190808, 0x19082b19192b2b19, 0x19082b2b08081908, 0x1919080808080808,
- 0x191908080808082b, 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819,
- 0x1919080808191908, 0x19190808082b0808, 0x19190808082b2b08, 0x1919080819080819,
- 0x1919080819081908, 0x1919080819190808, 0x191908082b080808, 0x1919081908080819,
- 0x1919081908081908, 0x1919081908190808, 0x1919081908191919, 0x1919081919080808,
- 0x191908191908082b, 0x1919082b08080808, 0x1919082b19081908, 0x1919082b2b2b2b2b,
- 0x1919190808080819, 0x1919190808081908, 0x1919190808190808, 0x19191908082b0819,
- 0x1919190819080808, 0x19191908192b0808, 0x191919082b080819, 0x191919082b2b0819,
- 0x1919191908080808, 0x1919191908082b08, 0x191919192b080808, 0x191919192b082b08,
- 0x1919192b082b0819, 0x1919192b192b2b08, 0x1919192b2b2b0819, 0x19192b0808080808,
- 0x19192b0808191908, 0x19192b0819080819, 0x19192b0819190808, 0x19192b082b192b19,
- 0x19192b1908192b2b, 0x19192b1919080808, 0x19192b191908082b, 0x19192b2b2b081919,
- 0x192b080808080819, 0x192b080808081908, 0x192b080808190808, 0x192b080819080808,
- 0x192b080819191908, 0x192b0808192b082b, 0x192b08082b08192b, 0x192b08082b2b2b19,
- 0x192b081908080808, 0x192b082b082b1908, 0x192b082b19082b2b, 0x192b082b2b19082b,
- 0x192b190808080808, 0x192b19080819192b, 0x192b191908190808, 0x192b191919080808,
- 0x192b191919081919, 0x192b19192b2b1908, 0x192b2b0808080819, 0x192b2b08192b2b2b,
- 0x192b2b19082b1919, 0x192b2b2b0808192b, 0x192b2b2b19191908, 0x192b2b2b192b082b,
- 0x2b08080808080808, 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08,
- 0x2b08080808190819, 0x2b08080808191908, 0x2b080808082b0808, 0x2b080808082b2b2b,
- 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808082b080808,
- 0x2b0808082b08082b, 0x2b0808082b2b2b08, 0x2b0808082b2b2b2b, 0x2b08081908080819,
- 0x2b08081908081908, 0x2b0808190808192b, 0x2b08081908190808, 0x2b08081919080808,
- 0x2b08081919190819, 0x2b08081919192b19, 0x2b08082b08080808, 0x2b08082b082b0808,
- 0x2b08082b2b080808, 0x2b08082b2b08082b, 0x2b08082b2b2b0808, 0x2b08082b2b2b2b08,
- 0x2b08190808080819, 0x2b08190808081908, 0x2b08190808190808, 0x2b0819080819082b,
- 0x2b08190808191919, 0x2b08190819080808, 0x2b081908192b0808, 0x2b0819082b082b19,
- 0x2b08191908080808, 0x2b08191919081908, 0x2b0819192b2b1919, 0x2b08192b08192b08,
- 0x2b08192b192b2b2b, 0x2b082b0808080808, 0x2b082b0808082b08, 0x2b082b08082b1919,
- 0x2b082b0819192b2b, 0x2b082b082b080808, 0x2b082b082b08082b, 0x2b082b082b2b2b08,
- 0x2b082b190808192b, 0x2b082b2b082b082b, 0x2b082b2b2b080808, 0x2b082b2b2b082b08,
- 0x2b082b2b2b19192b, 0x2b082b2b2b2b2b08, 0x2b19080808080819, 0x2b19080808081908,
- 0x2b19080808190808, 0x2b19080819080808, 0x2b1908081919192b, 0x2b1908082b081908,
- 0x2b19081908080808, 0x2b190819082b082b, 0x2b190819192b1908, 0x2b19082b1919192b,
- 0x2b19082b2b082b19, 0x2b19190808080808, 0x2b19190808081919, 0x2b19190819081908,
- 0x2b19190819190808, 0x2b19190819192b08, 0x2b191919082b2b19, 0x2b1919192b190808,
- 0x2b1919192b19082b, 0x2b19192b19080819, 0x2b192b0819190819, 0x2b192b082b2b192b,
- 0x2b192b1919082b19, 0x2b192b2b08191919, 0x2b192b2b192b0808, 0x2b2b080808080808,
- 0x2b2b08080808082b, 0x2b2b080808082b08, 0x2b2b080808082b2b, 0x2b2b0808082b0808,
- 0x2b2b0808082b2b2b, 0x2b2b08082b2b0808, 0x2b2b081919190819, 0x2b2b081919192b19,
- 0x2b2b08192b2b192b, 0x2b2b082b08080808, 0x2b2b082b0808082b, 0x2b2b082b08082b08,
- 0x2b2b082b082b2b2b, 0x2b2b082b2b080808, 0x2b2b082b2b2b0808, 0x2b2b190819080808,
- 0x2b2b19082b191919, 0x2b2b192b192b1919, 0x2b2b192b2b192b08, 0x2b2b2b0808082b2b,
- 0x2b2b2b08082b0808, 0x2b2b2b08082b082b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b0808,
- 0x2b2b2b082b2b2b08, 0x2b2b2b1908081908, 0x2b2b2b192b081908, 0x2b2b2b192b08192b,
- 0x2b2b2b2b082b2b08, 0x2b2b2b2b082b2b2b, 0x2b2b2b2b2b190819, 0x2b2b2b2b2b2b2b2b,
- };
- static const __device__ uint64_t iq2s_grid[1024] = {
- 0x0808080808080808, 0x080808080808082b, 0x0808080808081919, 0x0808080808082b08,
- 0x0808080808082b2b, 0x0808080808190819, 0x0808080808191908, 0x080808080819192b,
- 0x0808080808192b19, 0x08080808082b0808, 0x08080808082b082b, 0x08080808082b1919,
- 0x08080808082b2b08, 0x0808080819080819, 0x0808080819081908, 0x080808081908192b,
- 0x0808080819082b19, 0x0808080819190808, 0x080808081919082b, 0x0808080819191919,
- 0x0808080819192b08, 0x08080808192b0819, 0x08080808192b1908, 0x08080808192b192b,
- 0x08080808192b2b19, 0x080808082b080808, 0x080808082b08082b, 0x080808082b081919,
- 0x080808082b082b08, 0x080808082b190819, 0x080808082b191908, 0x080808082b2b0808,
- 0x080808082b2b1919, 0x080808082b2b2b2b, 0x0808081908080819, 0x0808081908081908,
- 0x080808190808192b, 0x0808081908082b19, 0x0808081908190808, 0x080808190819082b,
- 0x0808081908191919, 0x0808081908192b08, 0x08080819082b0819, 0x08080819082b1908,
- 0x0808081919080808, 0x080808191908082b, 0x0808081919081919, 0x0808081919082b08,
- 0x0808081919190819, 0x0808081919191908, 0x080808191919192b, 0x0808081919192b19,
- 0x08080819192b0808, 0x08080819192b1919, 0x08080819192b2b08, 0x080808192b080819,
- 0x080808192b081908, 0x080808192b190808, 0x080808192b19082b, 0x080808192b191919,
- 0x080808192b2b0819, 0x080808192b2b1908, 0x0808082b08080808, 0x0808082b0808082b,
- 0x0808082b08081919, 0x0808082b08082b08, 0x0808082b08190819, 0x0808082b08191908,
- 0x0808082b082b0808, 0x0808082b082b2b2b, 0x0808082b19080819, 0x0808082b19081908,
- 0x0808082b1908192b, 0x0808082b19082b19, 0x0808082b19190808, 0x0808082b19191919,
- 0x0808082b2b080808, 0x0808082b2b081919, 0x0808082b2b082b2b, 0x0808082b2b191908,
- 0x0808082b2b2b082b, 0x0808190808080819, 0x0808190808081908, 0x080819080808192b,
- 0x0808190808082b19, 0x0808190808190808, 0x080819080819082b, 0x0808190808191919,
- 0x0808190808192b08, 0x08081908082b0819, 0x08081908082b1908, 0x08081908082b192b,
- 0x08081908082b2b19, 0x0808190819080808, 0x080819081908082b, 0x0808190819081919,
- 0x0808190819082b08, 0x0808190819082b2b, 0x0808190819190819, 0x0808190819191908,
- 0x080819081919192b, 0x0808190819192b19, 0x08081908192b0808, 0x08081908192b082b,
- 0x08081908192b1919, 0x080819082b080819, 0x080819082b081908, 0x080819082b08192b,
- 0x080819082b082b19, 0x080819082b190808, 0x080819082b191919, 0x080819082b192b08,
- 0x080819082b2b0819, 0x080819082b2b1908, 0x0808191908080808, 0x080819190808082b,
- 0x0808191908081919, 0x0808191908082b08, 0x0808191908082b2b, 0x0808191908190819,
- 0x0808191908191908, 0x080819190819192b, 0x0808191908192b19, 0x08081919082b0808,
- 0x08081919082b1919, 0x08081919082b2b08, 0x0808191919080819, 0x0808191919081908,
- 0x080819191908192b, 0x0808191919082b19, 0x0808191919190808, 0x080819191919082b,
- 0x0808191919191919, 0x0808191919192b08, 0x08081919192b0819, 0x08081919192b1908,
- 0x080819192b080808, 0x080819192b08082b, 0x080819192b081919, 0x080819192b082b08,
- 0x080819192b190819, 0x080819192b191908, 0x080819192b2b0808, 0x0808192b08080819,
- 0x0808192b08081908, 0x0808192b0808192b, 0x0808192b08082b19, 0x0808192b08190808,
- 0x0808192b08191919, 0x0808192b19080808, 0x0808192b19081919, 0x0808192b19082b08,
- 0x0808192b19190819, 0x0808192b19191908, 0x0808192b192b0808, 0x0808192b2b080819,
- 0x0808192b2b081908, 0x0808192b2b190808, 0x08082b0808080808, 0x08082b080808082b,
- 0x08082b0808081919, 0x08082b0808082b08, 0x08082b0808190819, 0x08082b0808191908,
- 0x08082b080819192b, 0x08082b0808192b19, 0x08082b08082b0808, 0x08082b08082b1919,
- 0x08082b08082b2b2b, 0x08082b0819080819, 0x08082b0819081908, 0x08082b081908192b,
- 0x08082b0819082b19, 0x08082b0819190808, 0x08082b081919082b, 0x08082b0819191919,
- 0x08082b0819192b08, 0x08082b08192b0819, 0x08082b08192b1908, 0x08082b082b080808,
- 0x08082b082b081919, 0x08082b082b191908, 0x08082b082b2b2b2b, 0x08082b1908080819,
- 0x08082b1908081908, 0x08082b1908190808, 0x08082b190819082b, 0x08082b1908191919,
- 0x08082b1908192b08, 0x08082b19082b0819, 0x08082b1919080808, 0x08082b1919081919,
- 0x08082b1919082b08, 0x08082b1919190819, 0x08082b1919191908, 0x08082b19192b0808,
- 0x08082b192b080819, 0x08082b192b190808, 0x08082b2b08080808, 0x08082b2b08190819,
- 0x08082b2b08191908, 0x08082b2b082b082b, 0x08082b2b082b2b08, 0x08082b2b082b2b2b,
- 0x08082b2b19190808, 0x08082b2b2b192b19, 0x0819080808080819, 0x0819080808081908,
- 0x081908080808192b, 0x0819080808082b19, 0x0819080808190808, 0x081908080819082b,
- 0x0819080808191919, 0x0819080808192b08, 0x08190808082b0819, 0x08190808082b1908,
- 0x08190808082b192b, 0x0819080819080808, 0x081908081908082b, 0x0819080819081919,
- 0x0819080819082b08, 0x0819080819190819, 0x0819080819191908, 0x081908081919192b,
- 0x0819080819192b19, 0x08190808192b0808, 0x08190808192b082b, 0x08190808192b1919,
- 0x08190808192b2b08, 0x081908082b080819, 0x081908082b081908, 0x081908082b08192b,
- 0x081908082b190808, 0x081908082b191919, 0x081908082b192b08, 0x081908082b2b0819,
- 0x081908082b2b1908, 0x0819081908080808, 0x081908190808082b, 0x0819081908081919,
- 0x0819081908082b08, 0x0819081908082b2b, 0x0819081908190819, 0x0819081908191908,
- 0x081908190819192b, 0x0819081908192b19, 0x08190819082b0808, 0x08190819082b082b,
- 0x08190819082b1919, 0x08190819082b2b08, 0x0819081919080819, 0x0819081919081908,
- 0x081908191908192b, 0x0819081919082b19, 0x0819081919190808, 0x081908191919082b,
- 0x0819081919191919, 0x0819081919192b08, 0x08190819192b0819, 0x08190819192b1908,
- 0x081908192b080808, 0x081908192b08082b, 0x081908192b081919, 0x081908192b082b08,
- 0x081908192b190819, 0x081908192b191908, 0x0819082b08080819, 0x0819082b08081908,
- 0x0819082b08082b19, 0x0819082b08190808, 0x0819082b08191919, 0x0819082b082b0819,
- 0x0819082b082b1908, 0x0819082b19080808, 0x0819082b19081919, 0x0819082b19190819,
- 0x0819082b19191908, 0x0819082b2b080819, 0x0819082b2b081908, 0x0819082b2b190808,
- 0x0819190808080808, 0x081919080808082b, 0x0819190808081919, 0x0819190808082b08,
- 0x0819190808190819, 0x0819190808191908, 0x081919080819192b, 0x0819190808192b19,
- 0x08191908082b0808, 0x08191908082b1919, 0x08191908082b2b08, 0x0819190819080819,
- 0x0819190819081908, 0x081919081908192b, 0x0819190819082b19, 0x0819190819190808,
- 0x081919081919082b, 0x0819190819191919, 0x0819190819192b08, 0x08191908192b0819,
- 0x08191908192b1908, 0x081919082b080808, 0x081919082b08082b, 0x081919082b081919,
- 0x081919082b082b08, 0x081919082b190819, 0x081919082b191908, 0x081919082b2b0808,
- 0x0819191908080819, 0x0819191908081908, 0x081919190808192b, 0x0819191908082b19,
- 0x0819191908190808, 0x081919190819082b, 0x0819191908191919, 0x0819191908192b08,
- 0x08191919082b0819, 0x08191919082b1908, 0x0819191919080808, 0x081919191908082b,
- 0x0819191919081919, 0x0819191919082b08, 0x0819191919190819, 0x0819191919191908,
- 0x08191919192b0808, 0x081919192b080819, 0x081919192b081908, 0x081919192b190808,
- 0x0819192b08080808, 0x0819192b08081919, 0x0819192b08082b08, 0x0819192b08190819,
- 0x0819192b08191908, 0x0819192b082b0808, 0x0819192b19080819, 0x0819192b19081908,
- 0x0819192b19190808, 0x0819192b2b080808, 0x0819192b2b2b2b2b, 0x08192b0808080819,
- 0x08192b0808081908, 0x08192b080808192b, 0x08192b0808082b19, 0x08192b0808190808,
- 0x08192b0808191919, 0x08192b0808192b08, 0x08192b08082b0819, 0x08192b0819080808,
- 0x08192b081908082b, 0x08192b0819081919, 0x08192b0819082b08, 0x08192b0819190819,
- 0x08192b0819191908, 0x08192b08192b0808, 0x08192b082b080819, 0x08192b082b081908,
- 0x08192b1908080808, 0x08192b190808082b, 0x08192b1908081919, 0x08192b1908082b08,
- 0x08192b1908190819, 0x08192b1908191908, 0x08192b19082b0808, 0x08192b1919080819,
- 0x08192b1919081908, 0x08192b1919190808, 0x08192b19192b2b19, 0x08192b192b2b082b,
- 0x08192b2b08081908, 0x08192b2b08190808, 0x08192b2b19080808, 0x08192b2b1919192b,
- 0x082b080808080808, 0x082b08080808082b, 0x082b080808081919, 0x082b080808082b08,
- 0x082b080808190819, 0x082b080808191908, 0x082b08080819192b, 0x082b080808192b19,
- 0x082b0808082b0808, 0x082b0808082b1919, 0x082b0808082b2b2b, 0x082b080819080819,
- 0x082b080819081908, 0x082b080819190808, 0x082b08081919082b, 0x082b080819191919,
- 0x082b0808192b1908, 0x082b08082b080808, 0x082b08082b082b2b, 0x082b08082b191908,
- 0x082b08082b2b2b2b, 0x082b081908080819, 0x082b081908081908, 0x082b081908190808,
- 0x082b08190819082b, 0x082b081908191919, 0x082b0819082b0819, 0x082b081919080808,
- 0x082b08191908082b, 0x082b081919081919, 0x082b081919190819, 0x082b081919191908,
- 0x082b0819192b0808, 0x082b08192b080819, 0x082b08192b081908, 0x082b08192b190808,
- 0x082b082b08080808, 0x082b082b08082b2b, 0x082b082b082b082b, 0x082b082b082b2b08,
- 0x082b082b082b2b2b, 0x082b082b19081908, 0x082b082b19190808, 0x082b082b2b082b08,
- 0x082b082b2b082b2b, 0x082b082b2b2b2b08, 0x082b190808080819, 0x082b190808081908,
- 0x082b19080808192b, 0x082b190808082b19, 0x082b190808190808, 0x082b190808191919,
- 0x082b190808192b08, 0x082b1908082b0819, 0x082b1908082b1908, 0x082b190819080808,
- 0x082b19081908082b, 0x082b190819081919, 0x082b190819082b08, 0x082b190819190819,
- 0x082b190819191908, 0x082b1908192b0808, 0x082b19082b080819, 0x082b19082b081908,
- 0x082b19082b190808, 0x082b191908080808, 0x082b191908081919, 0x082b191908082b08,
- 0x082b191908190819, 0x082b191908191908, 0x082b1919082b0808, 0x082b191919080819,
- 0x082b191919081908, 0x082b191919190808, 0x082b1919192b192b, 0x082b19192b080808,
- 0x082b192b08080819, 0x082b192b08081908, 0x082b192b08190808, 0x082b192b19080808,
- 0x082b192b19192b19, 0x082b2b0808080808, 0x082b2b0808081919, 0x082b2b0808190819,
- 0x082b2b0808191908, 0x082b2b0819080819, 0x082b2b0819081908, 0x082b2b0819190808,
- 0x082b2b082b082b2b, 0x082b2b082b2b2b2b, 0x082b2b1908080819, 0x082b2b1908081908,
- 0x082b2b1908190808, 0x082b2b192b191919, 0x082b2b2b08082b2b, 0x082b2b2b082b082b,
- 0x082b2b2b192b1908, 0x082b2b2b2b082b08, 0x082b2b2b2b082b2b, 0x1908080808080819,
- 0x1908080808081908, 0x190808080808192b, 0x1908080808082b19, 0x1908080808190808,
- 0x190808080819082b, 0x1908080808191919, 0x1908080808192b08, 0x1908080808192b2b,
- 0x19080808082b0819, 0x19080808082b1908, 0x19080808082b192b, 0x1908080819080808,
- 0x190808081908082b, 0x1908080819081919, 0x1908080819082b08, 0x1908080819082b2b,
- 0x1908080819190819, 0x1908080819191908, 0x190808081919192b, 0x1908080819192b19,
- 0x19080808192b0808, 0x19080808192b082b, 0x19080808192b1919, 0x190808082b080819,
- 0x190808082b081908, 0x190808082b190808, 0x190808082b191919, 0x190808082b192b08,
- 0x190808082b2b0819, 0x190808082b2b1908, 0x1908081908080808, 0x190808190808082b,
- 0x1908081908081919, 0x1908081908082b08, 0x1908081908190819, 0x1908081908191908,
- 0x190808190819192b, 0x1908081908192b19, 0x19080819082b0808, 0x19080819082b082b,
- 0x19080819082b1919, 0x1908081919080819, 0x1908081919081908, 0x190808191908192b,
- 0x1908081919082b19, 0x1908081919190808, 0x190808191919082b, 0x1908081919191919,
- 0x1908081919192b08, 0x19080819192b0819, 0x19080819192b1908, 0x190808192b080808,
- 0x190808192b08082b, 0x190808192b081919, 0x190808192b082b08, 0x190808192b190819,
- 0x190808192b191908, 0x190808192b2b0808, 0x1908082b08080819, 0x1908082b08081908,
- 0x1908082b08190808, 0x1908082b0819082b, 0x1908082b08191919, 0x1908082b08192b08,
- 0x1908082b082b1908, 0x1908082b19080808, 0x1908082b19081919, 0x1908082b19082b08,
- 0x1908082b19190819, 0x1908082b19191908, 0x1908082b192b0808, 0x1908082b2b080819,
- 0x1908082b2b081908, 0x1908190808080808, 0x190819080808082b, 0x1908190808081919,
- 0x1908190808082b08, 0x1908190808082b2b, 0x1908190808190819, 0x1908190808191908,
- 0x190819080819192b, 0x1908190808192b19, 0x19081908082b0808, 0x19081908082b082b,
- 0x19081908082b1919, 0x19081908082b2b08, 0x1908190819080819, 0x1908190819081908,
- 0x190819081908192b, 0x1908190819082b19, 0x1908190819190808, 0x190819081919082b,
- 0x1908190819191919, 0x1908190819192b08, 0x19081908192b0819, 0x19081908192b1908,
- 0x190819082b080808, 0x190819082b08082b, 0x190819082b081919, 0x190819082b082b08,
- 0x190819082b190819, 0x190819082b191908, 0x190819082b2b0808, 0x1908191908080819,
- 0x1908191908081908, 0x190819190808192b, 0x1908191908082b19, 0x1908191908190808,
- 0x190819190819082b, 0x1908191908191919, 0x1908191908192b08, 0x19081919082b0819,
- 0x19081919082b1908, 0x1908191919080808, 0x190819191908082b, 0x1908191919081919,
- 0x1908191919082b08, 0x1908191919190819, 0x1908191919191908, 0x19081919192b0808,
- 0x19081919192b2b2b, 0x190819192b080819, 0x190819192b081908, 0x190819192b190808,
- 0x1908192b08080808, 0x1908192b0808082b, 0x1908192b08081919, 0x1908192b08082b08,
- 0x1908192b08190819, 0x1908192b08191908, 0x1908192b082b0808, 0x1908192b19080819,
- 0x1908192b19081908, 0x1908192b19190808, 0x1908192b2b080808, 0x1908192b2b2b1919,
- 0x19082b0808080819, 0x19082b0808081908, 0x19082b0808082b19, 0x19082b0808190808,
- 0x19082b080819082b, 0x19082b0808191919, 0x19082b0808192b08, 0x19082b08082b0819,
- 0x19082b08082b1908, 0x19082b0819080808, 0x19082b081908082b, 0x19082b0819081919,
- 0x19082b0819082b08, 0x19082b0819190819, 0x19082b0819191908, 0x19082b08192b0808,
- 0x19082b082b081908, 0x19082b082b190808, 0x19082b1908080808, 0x19082b190808082b,
- 0x19082b1908081919, 0x19082b1908082b08, 0x19082b1908190819, 0x19082b1908191908,
- 0x19082b19082b0808, 0x19082b1919080819, 0x19082b1919081908, 0x19082b1919190808,
- 0x19082b192b080808, 0x19082b192b19192b, 0x19082b2b08080819, 0x19082b2b08081908,
- 0x19082b2b08190808, 0x19082b2b19080808, 0x1919080808080808, 0x191908080808082b,
- 0x1919080808081919, 0x1919080808082b08, 0x1919080808190819, 0x1919080808191908,
- 0x191908080819192b, 0x1919080808192b19, 0x19190808082b0808, 0x19190808082b082b,
- 0x19190808082b1919, 0x19190808082b2b08, 0x1919080819080819, 0x1919080819081908,
- 0x191908081908192b, 0x1919080819082b19, 0x1919080819190808, 0x191908081919082b,
- 0x1919080819191919, 0x1919080819192b08, 0x19190808192b0819, 0x19190808192b1908,
- 0x191908082b080808, 0x191908082b08082b, 0x191908082b081919, 0x191908082b082b08,
- 0x191908082b190819, 0x191908082b191908, 0x1919081908080819, 0x1919081908081908,
- 0x191908190808192b, 0x1919081908082b19, 0x1919081908190808, 0x191908190819082b,
- 0x1919081908191919, 0x1919081908192b08, 0x19190819082b0819, 0x19190819082b1908,
- 0x1919081919080808, 0x191908191908082b, 0x1919081919081919, 0x1919081919082b08,
- 0x1919081919190819, 0x1919081919191908, 0x19190819192b0808, 0x191908192b080819,
- 0x191908192b081908, 0x191908192b190808, 0x1919082b08080808, 0x1919082b08081919,
- 0x1919082b08082b08, 0x1919082b08190819, 0x1919082b08191908, 0x1919082b082b0808,
- 0x1919082b19080819, 0x1919082b19081908, 0x1919082b19190808, 0x1919082b192b2b19,
- 0x1919082b2b080808, 0x1919190808080819, 0x1919190808081908, 0x191919080808192b,
- 0x1919190808082b19, 0x1919190808190808, 0x191919080819082b, 0x1919190808191919,
- 0x1919190808192b08, 0x19191908082b0819, 0x19191908082b1908, 0x1919190819080808,
- 0x191919081908082b, 0x1919190819081919, 0x1919190819082b08, 0x1919190819190819,
- 0x1919190819191908, 0x19191908192b0808, 0x191919082b080819, 0x191919082b081908,
- 0x191919082b190808, 0x1919191908080808, 0x191919190808082b, 0x1919191908081919,
- 0x1919191908082b08, 0x1919191908190819, 0x1919191908191908, 0x19191919082b0808,
- 0x1919191919080819, 0x1919191919081908, 0x1919191919190808, 0x191919192b080808,
- 0x1919192b08080819, 0x1919192b08081908, 0x1919192b08190808, 0x1919192b082b192b,
- 0x1919192b19080808, 0x19192b0808080808, 0x19192b080808082b, 0x19192b0808081919,
- 0x19192b0808082b08, 0x19192b0808190819, 0x19192b0808191908, 0x19192b08082b0808,
- 0x19192b0819080819, 0x19192b0819081908, 0x19192b0819190808, 0x19192b0819192b2b,
- 0x19192b082b080808, 0x19192b1908080819, 0x19192b1908081908, 0x19192b1908190808,
- 0x19192b1919080808, 0x19192b2b08080808, 0x19192b2b08192b19, 0x19192b2b2b081919,
- 0x19192b2b2b2b2b08, 0x192b080808080819, 0x192b080808081908, 0x192b08080808192b,
- 0x192b080808190808, 0x192b08080819082b, 0x192b080808191919, 0x192b080808192b08,
- 0x192b0808082b0819, 0x192b0808082b1908, 0x192b080819080808, 0x192b080819081919,
- 0x192b080819082b08, 0x192b080819190819, 0x192b080819191908, 0x192b0808192b0808,
- 0x192b08082b081908, 0x192b08082b190808, 0x192b081908080808, 0x192b08190808082b,
- 0x192b081908081919, 0x192b081908082b08, 0x192b081908190819, 0x192b081908191908,
- 0x192b0819082b0808, 0x192b081919080819, 0x192b081919081908, 0x192b081919190808,
- 0x192b08192b080808, 0x192b08192b192b19, 0x192b082b08081908, 0x192b082b08190808,
- 0x192b082b19080808, 0x192b082b1919192b, 0x192b082b2b2b0819, 0x192b190808080808,
- 0x192b190808081919, 0x192b190808082b08, 0x192b190808190819, 0x192b190808191908,
- 0x192b1908082b0808, 0x192b190819080819, 0x192b190819081908, 0x192b190819190808,
- 0x192b19082b080808, 0x192b191908080819, 0x192b191908081908, 0x192b191908190808,
- 0x192b191919080808, 0x192b191919082b2b, 0x192b1919192b2b08, 0x192b19192b19082b,
- 0x192b192b08080808, 0x192b192b2b191908, 0x192b2b0808080819, 0x192b2b0808081908,
- 0x192b2b0808190808, 0x192b2b08192b1919, 0x192b2b082b192b08, 0x192b2b1908080808,
- 0x192b2b19082b2b2b, 0x192b2b2b1908082b, 0x192b2b2b2b2b0819, 0x2b08080808080808,
- 0x2b0808080808082b, 0x2b08080808081919, 0x2b08080808082b08, 0x2b08080808190819,
- 0x2b08080808191908, 0x2b08080808192b19, 0x2b080808082b0808, 0x2b080808082b1919,
- 0x2b08080819080819, 0x2b08080819081908, 0x2b08080819190808, 0x2b0808081919082b,
- 0x2b08080819191919, 0x2b08080819192b08, 0x2b080808192b0819, 0x2b0808082b080808,
- 0x2b0808082b081919, 0x2b0808082b190819, 0x2b0808082b191908, 0x2b08081908080819,
- 0x2b08081908081908, 0x2b08081908082b19, 0x2b08081908190808, 0x2b0808190819082b,
- 0x2b08081908191919, 0x2b08081908192b08, 0x2b080819082b0819, 0x2b080819082b1908,
- 0x2b08081919080808, 0x2b0808191908082b, 0x2b08081919081919, 0x2b08081919082b08,
- 0x2b08081919190819, 0x2b08081919191908, 0x2b0808192b080819, 0x2b0808192b081908,
- 0x2b0808192b190808, 0x2b0808192b2b2b19, 0x2b08082b08080808, 0x2b08082b08081919,
- 0x2b08082b08082b2b, 0x2b08082b08190819, 0x2b08082b08191908, 0x2b08082b19080819,
- 0x2b08082b19081908, 0x2b08082b19190808, 0x2b08190808080819, 0x2b08190808081908,
- 0x2b0819080808192b, 0x2b08190808082b19, 0x2b08190808190808, 0x2b0819080819082b,
- 0x2b08190808191919, 0x2b08190808192b08, 0x2b081908082b0819, 0x2b08190819080808,
- 0x2b0819081908082b, 0x2b08190819081919, 0x2b08190819082b08, 0x2b08190819190819,
- 0x2b08190819191908, 0x2b081908192b0808, 0x2b0819082b080819, 0x2b0819082b081908,
- 0x2b0819082b190808, 0x2b08191908080808, 0x2b0819190808082b, 0x2b08191908081919,
- 0x2b08191908082b08, 0x2b08191908190819, 0x2b08191908191908, 0x2b081919082b0808,
- 0x2b08191919080819, 0x2b08191919081908, 0x2b08191919190808, 0x2b0819192b080808,
- 0x2b0819192b082b2b, 0x2b08192b08080819, 0x2b08192b08081908, 0x2b08192b08190808,
- 0x2b08192b082b2b19, 0x2b08192b19080808, 0x2b082b0808080808, 0x2b082b0808081919,
- 0x2b082b0808190819, 0x2b082b0808191908, 0x2b082b0819080819, 0x2b082b0819081908,
- 0x2b082b0819190808, 0x2b082b082b2b082b, 0x2b082b1908080819, 0x2b082b1908081908,
- 0x2b082b1919080808, 0x2b082b19192b1919, 0x2b082b2b082b082b, 0x2b082b2b19192b08,
- 0x2b082b2b19192b2b, 0x2b082b2b2b08082b, 0x2b082b2b2b2b082b, 0x2b19080808080819,
- 0x2b19080808081908, 0x2b19080808082b19, 0x2b19080808190808, 0x2b1908080819082b,
- 0x2b19080808191919, 0x2b19080808192b08, 0x2b190808082b1908, 0x2b19080819080808,
- 0x2b1908081908082b, 0x2b19080819081919, 0x2b19080819082b08, 0x2b19080819190819,
- 0x2b19080819191908, 0x2b190808192b0808, 0x2b1908082b080819, 0x2b1908082b081908,
- 0x2b1908082b190808, 0x2b19081908080808, 0x2b19081908081919, 0x2b19081908190819,
- 0x2b19081908191908, 0x2b19081919080819, 0x2b19081919081908, 0x2b19081919190808,
- 0x2b19081919192b2b, 0x2b19082b08080819, 0x2b19082b08081908, 0x2b19082b08190808,
- 0x2b19082b19080808, 0x2b19082b2b2b192b, 0x2b19190808080808, 0x2b1919080808082b,
- 0x2b19190808081919, 0x2b19190808082b08, 0x2b19190808190819, 0x2b19190808191908,
- 0x2b191908082b0808, 0x2b19190819080819, 0x2b19190819081908, 0x2b19190819190808,
- 0x2b1919082b080808, 0x2b1919082b19192b, 0x2b19191908080819, 0x2b19191908081908,
- 0x2b19191908190808, 0x2b19191919080808, 0x2b1919192b192b08, 0x2b1919192b2b0819,
- 0x2b19192b08080808, 0x2b19192b1908192b, 0x2b19192b192b1908, 0x2b192b0808080819,
- 0x2b192b0808081908, 0x2b192b0808190808, 0x2b192b08082b192b, 0x2b192b0819080808,
- 0x2b192b082b2b2b19, 0x2b192b1908080808, 0x2b192b1919082b19, 0x2b192b191919082b,
- 0x2b192b2b2b190808, 0x2b2b080808080808, 0x2b2b080808081919, 0x2b2b080808082b2b,
- 0x2b2b080808191908, 0x2b2b0808082b082b, 0x2b2b0808082b2b2b, 0x2b2b080819080819,
- 0x2b2b080819081908, 0x2b2b080819190808, 0x2b2b08082b2b082b, 0x2b2b08082b2b2b2b,
- 0x2b2b081919080808, 0x2b2b0819192b1919, 0x2b2b082b0808082b, 0x2b2b082b08082b2b,
- 0x2b2b082b082b082b, 0x2b2b082b082b2b08, 0x2b2b082b082b2b2b, 0x2b2b082b2b08082b,
- 0x2b2b082b2b082b08, 0x2b2b082b2b082b2b, 0x2b2b082b2b2b2b08, 0x2b2b190808080819,
- 0x2b2b190808081908, 0x2b2b190808190808, 0x2b2b190819080808, 0x2b2b19082b082b19,
- 0x2b2b19082b2b1908, 0x2b2b191908080808, 0x2b2b191908192b19, 0x2b2b192b19190819,
- 0x2b2b2b0808082b2b, 0x2b2b2b08082b2b08, 0x2b2b2b082b2b082b, 0x2b2b2b1919191908,
- 0x2b2b2b192b08192b, 0x2b2b2b2b08082b08, 0x2b2b2b2b08082b2b, 0x2b2b2b2b082b0808,
- 0x2b2b2b2b082b082b, 0x2b2b2b2b082b2b08, 0x2b2b2b2b2b082b08, 0x2b2b2b2b2b2b2b2b,
- };
- static const __device__ uint32_t iq3xxs_grid[256] = {
- 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414,
- 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14,
- 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404,
- 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e,
- 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c,
- 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c,
- 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34,
- 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c,
- 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c,
- 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04,
- 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c,
- 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414,
- 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434,
- 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c,
- 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e,
- 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24,
- 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24,
- 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c,
- 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c,
- 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14,
- 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414,
- 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e,
- 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404,
- 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c,
- 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c,
- 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14,
- 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c,
- 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c,
- 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14,
- 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14,
- 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c,
- 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04,
- };
- static const __device__ uint32_t iq3xs_grid[512] = {
- 0x04040404, 0x0404040c, 0x04040414, 0x0404042c, 0x0404043e, 0x04040c04, 0x04040c0c, 0x04040c14,
- 0x04040c24, 0x04040c34, 0x04041404, 0x0404140c, 0x0404142c, 0x04041c1c, 0x04042404, 0x04042414,
- 0x0404242c, 0x0404243e, 0x04042c0c, 0x04042c1c, 0x04043404, 0x04043414, 0x04043e0c, 0x04043e24,
- 0x04043e3e, 0x040c0404, 0x040c040c, 0x040c0414, 0x040c0424, 0x040c0c04, 0x040c0c0c, 0x040c0c2c,
- 0x040c1404, 0x040c141c, 0x040c143e, 0x040c1c0c, 0x040c1c2c, 0x040c2424, 0x040c340c, 0x040c342c,
- 0x040c3e14, 0x04140404, 0x0414040c, 0x0414042c, 0x0414043e, 0x04140c04, 0x04140c1c, 0x04140c34,
- 0x0414140c, 0x0414142c, 0x04141c04, 0x04141c24, 0x04142414, 0x0414242c, 0x0414243e, 0x04142c0c,
- 0x04142c1c, 0x04143e04, 0x04143e1c, 0x041c041c, 0x041c0c0c, 0x041c0c2c, 0x041c1404, 0x041c1414,
- 0x041c1c0c, 0x041c1c1c, 0x041c1c34, 0x041c2424, 0x041c2c04, 0x041c2c14, 0x041c343e, 0x041c3e0c,
- 0x041c3e2c, 0x04240404, 0x04240c1c, 0x04240c3e, 0x0424140c, 0x04241424, 0x04241c14, 0x04242404,
- 0x0424241c, 0x04242c0c, 0x04243e04, 0x042c0414, 0x042c0424, 0x042c1404, 0x042c1414, 0x042c1434,
- 0x042c1c1c, 0x042c240c, 0x042c242c, 0x042c243e, 0x042c3434, 0x042c3e1c, 0x04340434, 0x04340c0c,
- 0x04340c1c, 0x04341c0c, 0x04342c14, 0x04343e0c, 0x043e0404, 0x043e0414, 0x043e0424, 0x043e1404,
- 0x043e1414, 0x043e1434, 0x043e1c1c, 0x043e2c04, 0x043e2c24, 0x0c040404, 0x0c04040c, 0x0c040414,
- 0x0c040424, 0x0c040c04, 0x0c040c0c, 0x0c040c1c, 0x0c040c2c, 0x0c040c3e, 0x0c041404, 0x0c041414,
- 0x0c041c0c, 0x0c041c24, 0x0c041c34, 0x0c042c24, 0x0c042c34, 0x0c04340c, 0x0c043e14, 0x0c0c0404,
- 0x0c0c040c, 0x0c0c041c, 0x0c0c0434, 0x0c0c0c04, 0x0c0c0c24, 0x0c0c140c, 0x0c0c1c04, 0x0c0c1c1c,
- 0x0c0c240c, 0x0c0c2c04, 0x0c0c2c14, 0x0c0c3e04, 0x0c0c3e34, 0x0c140404, 0x0c140c14, 0x0c140c2c,
- 0x0c140c3e, 0x0c141404, 0x0c141424, 0x0c141c14, 0x0c142404, 0x0c14241c, 0x0c142c2c, 0x0c143404,
- 0x0c143e14, 0x0c1c040c, 0x0c1c0424, 0x0c1c043e, 0x0c1c0c04, 0x0c1c0c1c, 0x0c1c140c, 0x0c1c143e,
- 0x0c1c1c04, 0x0c1c1c24, 0x0c1c240c, 0x0c1c3414, 0x0c1c3e04, 0x0c24041c, 0x0c24042c, 0x0c240c14,
- 0x0c240c24, 0x0c241c0c, 0x0c241c1c, 0x0c242414, 0x0c242434, 0x0c242c04, 0x0c242c24, 0x0c2c040c,
- 0x0c2c0c04, 0x0c2c0c1c, 0x0c2c140c, 0x0c2c1c04, 0x0c2c1c14, 0x0c2c2c0c, 0x0c341404, 0x0c341424,
- 0x0c34143e, 0x0c342424, 0x0c342434, 0x0c3e040c, 0x0c3e041c, 0x0c3e0c04, 0x0c3e0c14, 0x0c3e140c,
- 0x0c3e1c2c, 0x0c3e240c, 0x0c3e3414, 0x0c3e3e04, 0x14040404, 0x1404040c, 0x1404041c, 0x1404042c,
- 0x1404043e, 0x14040c04, 0x14040c14, 0x14040c24, 0x14040c34, 0x1404140c, 0x1404141c, 0x1404143e,
- 0x14041c04, 0x14041c14, 0x1404240c, 0x1404241c, 0x1404242c, 0x14042c04, 0x14042c14, 0x1404343e,
- 0x14043e04, 0x14043e1c, 0x14043e2c, 0x140c0404, 0x140c0414, 0x140c0c04, 0x140c0c1c, 0x140c0c3e,
- 0x140c1414, 0x140c142c, 0x140c1c0c, 0x140c1c24, 0x140c2414, 0x140c2c0c, 0x1414040c, 0x14140424,
- 0x1414043e, 0x1414140c, 0x1414141c, 0x14141c04, 0x14141c3e, 0x1414240c, 0x14142c1c, 0x14142c3e,
- 0x14143e0c, 0x14143e24, 0x141c0404, 0x141c0414, 0x141c042c, 0x141c0c0c, 0x141c1414, 0x141c1424,
- 0x141c1c0c, 0x141c1c1c, 0x141c2414, 0x141c2c04, 0x141c3434, 0x1424040c, 0x1424043e, 0x14241404,
- 0x1424141c, 0x14241c14, 0x14241c2c, 0x1424240c, 0x14243e14, 0x14243e2c, 0x142c0424, 0x142c0c0c,
- 0x142c1414, 0x142c1c3e, 0x142c2404, 0x142c2c1c, 0x142c3e04, 0x14340404, 0x14340414, 0x1434043e,
- 0x1434140c, 0x14342c2c, 0x1434340c, 0x143e042c, 0x143e0c0c, 0x143e1434, 0x143e1c04, 0x143e241c,
- 0x143e2c04, 0x1c040414, 0x1c040c0c, 0x1c040c1c, 0x1c040c2c, 0x1c040c3e, 0x1c041414, 0x1c041c0c,
- 0x1c041c1c, 0x1c041c2c, 0x1c042414, 0x1c042424, 0x1c04243e, 0x1c042c0c, 0x1c04341c, 0x1c043e0c,
- 0x1c0c040c, 0x1c0c041c, 0x1c0c042c, 0x1c0c0c24, 0x1c0c140c, 0x1c0c141c, 0x1c0c2404, 0x1c0c3404,
- 0x1c0c3e14, 0x1c0c3e34, 0x1c140404, 0x1c140c14, 0x1c141404, 0x1c141c14, 0x1c141c24, 0x1c142c04,
- 0x1c1c040c, 0x1c1c0c04, 0x1c1c0c24, 0x1c1c140c, 0x1c1c141c, 0x1c1c143e, 0x1c1c1c04, 0x1c1c240c,
- 0x1c1c241c, 0x1c1c243e, 0x1c1c2c2c, 0x1c1c3e1c, 0x1c24041c, 0x1c240c0c, 0x1c240c34, 0x1c241414,
- 0x1c241c0c, 0x1c242c14, 0x1c243404, 0x1c243424, 0x1c2c040c, 0x1c2c0c04, 0x1c2c0c14, 0x1c2c142c,
- 0x1c2c1c14, 0x1c2c2424, 0x1c2c2c34, 0x1c2c3e1c, 0x1c340c34, 0x1c34240c, 0x1c3e040c, 0x1c3e041c,
- 0x1c3e1404, 0x1c3e1414, 0x1c3e1c2c, 0x24040404, 0x24040424, 0x24040c14, 0x24041404, 0x24041424,
- 0x2404143e, 0x24041c14, 0x2404240c, 0x24042c04, 0x24043e04, 0x240c0414, 0x240c043e, 0x240c0c0c,
- 0x240c0c1c, 0x240c1414, 0x240c1c04, 0x240c1c2c, 0x240c241c, 0x240c2c0c, 0x240c2c2c, 0x2414040c,
- 0x2414041c, 0x24140c04, 0x24140c2c, 0x2414140c, 0x24141c1c, 0x24142404, 0x24142c3e, 0x24143414,
- 0x24143e04, 0x241c0424, 0x241c0c0c, 0x241c0c1c, 0x241c1404, 0x241c1414, 0x241c1c0c, 0x241c1c2c,
- 0x24240404, 0x24240414, 0x24241424, 0x24241c3e, 0x24242404, 0x24243e0c, 0x242c042c, 0x242c043e,
- 0x242c140c, 0x242c3414, 0x24340c1c, 0x24341c24, 0x24343404, 0x243e0c04, 0x243e0c2c, 0x243e1c04,
- 0x243e241c, 0x243e2c0c, 0x2c040414, 0x2c040c04, 0x2c040c24, 0x2c041414, 0x2c042404, 0x2c042424,
- 0x2c04243e, 0x2c042c14, 0x2c043434, 0x2c043e24, 0x2c0c040c, 0x2c0c041c, 0x2c0c042c, 0x2c0c0c14,
- 0x2c0c140c, 0x2c0c1c14, 0x2c0c3e14, 0x2c140404, 0x2c140c0c, 0x2c14141c, 0x2c141c04, 0x2c141c34,
- 0x2c142c1c, 0x2c1c0414, 0x2c1c043e, 0x2c1c0c04, 0x2c1c143e, 0x2c1c2424, 0x2c1c2c0c, 0x2c1c342c,
- 0x2c1c3e1c, 0x2c24040c, 0x2c240424, 0x2c241404, 0x2c241c14, 0x2c242434, 0x2c2c0c14, 0x2c2c1434,
- 0x2c2c2c0c, 0x2c2c2c1c, 0x2c342414, 0x2c3e0414, 0x2c3e0424, 0x2c3e1414, 0x34040c0c, 0x34040c1c,
- 0x34040c2c, 0x34041c0c, 0x34041c1c, 0x34043404, 0x340c0404, 0x340c1404, 0x340c143e, 0x340c3424,
- 0x34140c14, 0x34141c24, 0x34142414, 0x34142c2c, 0x34143414, 0x34143e04, 0x341c0404, 0x341c0c24,
- 0x341c140c, 0x341c2404, 0x3424142c, 0x3424241c, 0x34243414, 0x342c0404, 0x342c041c, 0x342c1c24,
- 0x342c3404, 0x3434042c, 0x34342404, 0x343e0c0c, 0x343e0c1c, 0x3e040404, 0x3e040424, 0x3e04043e,
- 0x3e041404, 0x3e041414, 0x3e041c34, 0x3e042404, 0x3e042c24, 0x3e043414, 0x3e0c0414, 0x3e0c0c0c,
- 0x3e0c1424, 0x3e0c241c, 0x3e0c242c, 0x3e14040c, 0x3e140424, 0x3e140c04, 0x3e140c34, 0x3e14140c,
- 0x3e141c04, 0x3e142c0c, 0x3e1c0414, 0x3e1c1c14, 0x3e1c1c2c, 0x3e1c2c1c, 0x3e24040c, 0x3e24042c,
- 0x3e240c1c, 0x3e241404, 0x3e242c04, 0x3e2c1414, 0x3e2c2414, 0x3e340414, 0x3e341c0c, 0x3e3e0404,
- };
- static const __device__ uint64_t iq1s_grid[512] = {
- 0xffffffffffff0101, 0xffffffffff01ff00, 0xffffffffff010100, 0xffffffff00000000,
- 0xffffffff01ff00ff, 0xffffffff01ff0001, 0xffffffff0101ffff, 0xffffffff0101ff01,
- 0xffffff00ff000000, 0xffffff000000ff00, 0xffffff00000000ff, 0xffffff0000000100,
- 0xffffff0000010000, 0xffffff0001000000, 0xffffff01ffff00ff, 0xffffff01ff01ff00,
- 0xffffff01ff010100, 0xffffff0100000001, 0xffffff0101ffff00, 0xffffff0101ff0101,
- 0xffffff0101010100, 0xffff00ffff00ff01, 0xffff00ffff0000ff, 0xffff00ff00ff0100,
- 0xffff00ff0100ff00, 0xffff00ff010001ff, 0xffff0000ff0101ff, 0xffff000000ffff00,
- 0xffff000000000000, 0xffff00000001ff01, 0xffff000001000101, 0xffff0000010100ff,
- 0xffff0001ffff0100, 0xffff00010000ff00, 0xffff000100010101, 0xffff000101000000,
- 0xffff01ffffff0000, 0xffff01ffff01ffff, 0xffff01ffff010100, 0xffff01ff00000000,
- 0xffff01ff01ffffff, 0xffff01ff01ff0001, 0xffff01ff0101ffff, 0xffff01ff01010001,
- 0xffff0100ffffff01, 0xffff01000000ffff, 0xffff010000000100, 0xffff010001ff01ff,
- 0xffff010001000000, 0xffff0101ff000000, 0xffff0101000101ff, 0xffff010101ffff01,
- 0xffff01010101ff00, 0xff00ffffff000000, 0xff00ffff00ffff00, 0xff00ffff00000001,
- 0xff00ffff000001ff, 0xff00ffff01010000, 0xff00ff00ffff0000, 0xff00ff00ff00ff00,
- 0xff00ff00ff0000ff, 0xff00ff00ff000100, 0xff00ff00ff010001, 0xff00ff0000ff0001,
- 0xff00ff000000ffff, 0xff00ff0000000000, 0xff00ff000001ff00, 0xff00ff0000010100,
- 0xff00ff0001ff0000, 0xff00ff000100ff00, 0xff00ff0001000100, 0xff00ff01ff000000,
- 0xff00ff0100ff0000, 0xff00ff01000001ff, 0xff00ff0101010001, 0xff0000ff00000000,
- 0xff0000ff0001ff00, 0xff0000ff00010100, 0xff000000ffff0101, 0xff000000ff000000,
- 0xff000000ff01ff00, 0xff00000000ff0000, 0xff0000000000ff00, 0xff000000000000ff,
- 0xff00000000000000, 0xff00000000000001, 0xff00000000000100, 0xff0000000001ffff,
- 0xff00000000010000, 0xff00000001000000, 0xff00000001010100, 0xff000001ff00ff01,
- 0xff000001ff0100ff, 0xff00000100000000, 0xff0000010001ff00, 0xff00000101ff0100,
- 0xff0000010100ff00, 0xff0001ff00ff00ff, 0xff0001ff00000101, 0xff0001ff000100ff,
- 0xff0001ff01000000, 0xff000100ff0001ff, 0xff0001000000ff01, 0xff00010000000000,
- 0xff00010000010001, 0xff00010000010100, 0xff00010001ffff00, 0xff00010001ff0101,
- 0xff00010001010000, 0xff000101ffffffff, 0xff000101ff000101, 0xff00010101ff00ff,
- 0xff00010101000001, 0xff000101010100ff, 0xff01ffffff000101, 0xff01ffffff01ffff,
- 0xff01ffffff01ff01, 0xff01ffffff0101ff, 0xff01ffff00000000, 0xff01ffff01ff0001,
- 0xff01ffff0101ff01, 0xff01ff00ff000000, 0xff01ff0000ff0100, 0xff01ff000000ff01,
- 0xff01ff0000010000, 0xff01ff00010000ff, 0xff01ff01ff01ff00, 0xff01ff0100000101,
- 0xff0100ffffff0000, 0xff0100ffff010000, 0xff0100ff01ff00ff, 0xff0100ff01000100,
- 0xff0100ff010100ff, 0xff010000ffffff01, 0xff01000000000000, 0xff0100000101ff00,
- 0xff010001ffff00ff, 0xff010001ff000100, 0xff01000100ffff00, 0xff01000100010001,
- 0xff01000101ff0001, 0xff010001010001ff, 0xff0101ffffffffff, 0xff0101ffff01ffff,
- 0xff0101ffff010101, 0xff0101ff0000ff00, 0xff0101ff01010001, 0xff010100ff000000,
- 0xff010100ff01ff01, 0xff01010000ff0001, 0xff01010000000100, 0xff01010001000000,
- 0xff0101010100ffff, 0x00ffffff0000ff01, 0x00ffffff000000ff, 0x00ffffff00000100,
- 0x00ffffff00010000, 0x00ffff00ffff0001, 0x00ffff00ff0000ff, 0x00ffff00ff000100,
- 0x00ffff0000000000, 0x00ffff0001000100, 0x00ffff0001010001, 0x00ffff01ff00ff01,
- 0x00ffff0100ff0100, 0x00ffff010000ff00, 0x00ffff01000100ff, 0x00ffff0101ff00ff,
- 0x00ffff010101ff00, 0x00ff00ffffffffff, 0x00ff00ffffff01ff, 0x00ff00ffff000101,
- 0x00ff00ff00000000, 0x00ff00ff000101ff, 0x00ff00ff01010101, 0x00ff0000ff000000,
- 0x00ff0000ff01ffff, 0x00ff000000ff0000, 0x00ff00000000ff00, 0x00ff0000000000ff,
- 0x00ff000000000000, 0x00ff000000000001, 0x00ff000000000100, 0x00ff000000010000,
- 0x00ff000001ffff01, 0x00ff000001000000, 0x00ff0001ff000101, 0x00ff000100ffffff,
- 0x00ff000100000000, 0x00ff0001010001ff, 0x00ff01ffff000000, 0x00ff01ff0001ff00,
- 0x00ff01ff01ff0100, 0x00ff0100ff01ff01, 0x00ff010000ff00ff, 0x00ff010000ff0101,
- 0x00ff010000000000, 0x00ff010000010101, 0x00ff01000100ff00, 0x00ff010001010000,
- 0x00ff0101ffffff00, 0x00ff01010000ff01, 0x00ff010100000100, 0x00ff010101ff0000,
- 0x0000ffffffff0100, 0x0000ffffff00ff00, 0x0000ffffff0000ff, 0x0000ffffff010000,
- 0x0000ffff00000000, 0x0000ffff00010101, 0x0000ffff01ffff01, 0x0000ffff01000100,
- 0x0000ff00ff000000, 0x0000ff00ff01ff00, 0x0000ff00ff0101ff, 0x0000ff0000ff0000,
- 0x0000ff000000ff00, 0x0000ff00000000ff, 0x0000ff0000000000, 0x0000ff0000000001,
- 0x0000ff0000000100, 0x0000ff0000010000, 0x0000ff0001ffffff, 0x0000ff0001ff01ff,
- 0x0000ff0001000000, 0x0000ff000101ffff, 0x0000ff01ffff0101, 0x0000ff01ff010000,
- 0x0000ff0100000000, 0x0000ff0101000101, 0x000000ffffff0001, 0x000000ffff000000,
- 0x000000ff00ff0000, 0x000000ff0000ff00, 0x000000ff000000ff, 0x000000ff00000000,
- 0x000000ff00000001, 0x000000ff00000100, 0x000000ff00010000, 0x000000ff01000000,
- 0x000000ff0101ff00, 0x00000000ffff0000, 0x00000000ff00ff00, 0x00000000ff0000ff,
- 0x00000000ff000000, 0x00000000ff000001, 0x00000000ff000100, 0x00000000ff010000,
- 0x0000000000ffff00, 0x0000000000ff00ff, 0x0000000000ff0000, 0x0000000000ff0001,
- 0x0000000000ff0100, 0x000000000000ffff, 0x000000000000ff00, 0x000000000000ff01,
- 0x00000000000000ff, 0x0000000000000001, 0x00000000000001ff, 0x0000000000000100,
- 0x0000000000000101, 0x000000000001ff00, 0x00000000000100ff, 0x0000000000010000,
- 0x0000000000010001, 0x0000000000010100, 0x0000000001ff0000, 0x000000000100ff00,
- 0x00000000010000ff, 0x0000000001000000, 0x0000000001000001, 0x0000000001000100,
- 0x0000000001010000, 0x00000001ffff01ff, 0x00000001ff000000, 0x0000000100ff0000,
- 0x000000010000ff00, 0x00000001000000ff, 0x0000000100000000, 0x0000000100000001,
- 0x0000000100000100, 0x0000000100010000, 0x0000000101000000, 0x000001ffff00ff00,
- 0x000001ffff010001, 0x000001ffff0101ff, 0x000001ff00ffff01, 0x000001ff0000ffff,
- 0x000001ff00000000, 0x000001ff010000ff, 0x000001ff01010100, 0x00000100ffff0100,
- 0x00000100ff000000, 0x0000010000ff0000, 0x000001000000ff00, 0x00000100000000ff,
- 0x0000010000000000, 0x0000010000000001, 0x0000010000000100, 0x0000010000010000,
- 0x0000010001000000, 0x000001000101ff01, 0x00000101ffff0001, 0x00000101ff01ffff,
- 0x0000010100000000, 0x0000010101010100, 0x0001ffffff000000, 0x0001ffff00ffffff,
- 0x0001ffff00000100, 0x0001ffff0001ff00, 0x0001ffff01000000, 0x0001ff00ffffff00,
- 0x0001ff00ffff01ff, 0x0001ff00ff010000, 0x0001ff0000000000, 0x0001ff0000010001,
- 0x0001ff0001ff0000, 0x0001ff0001010100, 0x0001ff01ff0000ff, 0x0001ff01ff000001,
- 0x0001ff0100ffffff, 0x0001ff010001ffff, 0x0001ff01000101ff, 0x0001ff010100ff01,
- 0x000100ffff00ffff, 0x000100ffff00ff01, 0x000100ffff000100, 0x000100ff00000000,
- 0x000100ff000101ff, 0x000100ff01ff0101, 0x000100ff0100ffff, 0x000100ff01010101,
- 0x00010000ff000000, 0x00010000ff010100, 0x0001000000ff0000, 0x000100000000ff00,
- 0x00010000000000ff, 0x0001000000000000, 0x0001000000000001, 0x0001000000000100,
- 0x0001000000010000, 0x0001000001ffff01, 0x0001000001000000, 0x0001000100ff0101,
- 0x0001000100000000, 0x00010001010100ff, 0x000101ffffff01ff, 0x000101ffffff0101,
- 0x000101ff00010000, 0x000101ff01ff0000, 0x000101ff0100ff01, 0x00010100ffff0000,
- 0x0001010000000000, 0x000101000001ffff, 0x0001010000010101, 0x00010100010001ff,
- 0x00010101ff00ff00, 0x00010101ff010001, 0x0001010100ffffff, 0x0001010100ff01ff,
- 0x00010101000101ff, 0x0001010101ff0000, 0x000101010100ff01, 0x0001010101000101,
- 0x01ffffffffff0101, 0x01ffffffff01ffff, 0x01ffffffff01ff01, 0x01ffffffff0101ff,
- 0x01ffffffff010101, 0x01ffffff00000000, 0x01ffffff01ff01ff, 0x01ffffff01000101,
- 0x01ffffff0101ff01, 0x01ffffff010100ff, 0x01ffff000000ff00, 0x01ffff0000000001,
- 0x01ffff00000001ff, 0x01ffff0000010000, 0x01ffff0001ff0000, 0x01ffff01ffffffff,
- 0x01ffff01ffff01ff, 0x01ffff01ff000000, 0x01ffff01ff01ffff, 0x01ffff01ff0101ff,
- 0x01ffff010100ffff, 0x01ff00ffffff0000, 0x01ff00ffff010000, 0x01ff00ff00ffff01,
- 0x01ff0000ff0000ff, 0x01ff000000000000, 0x01ff00000001ff01, 0x01ff000001ffffff,
- 0x01ff000001010100, 0x01ff0001ffffff01, 0x01ff0001ff010001, 0x01ff000101ff0100,
- 0x01ff000101000001, 0x01ff0001010100ff, 0x01ff01ffff00ffff, 0x01ff01ff00010001,
- 0x01ff01ff01000000, 0x01ff01ff010101ff, 0x01ff0100ff000001, 0x01ff010000ffff00,
- 0x01ff010000000100, 0x01ff010001ff01ff, 0x01ff01000101ffff, 0x01ff0101ffff00ff,
- 0x01ff0101ffff0101, 0x01ff0101ff0101ff, 0x01ff010100010000, 0x0100ffff00ff00ff,
- 0x0100ffff00ff0001, 0x0100ffff00000100, 0x0100ffff0100ff00, 0x0100ff00ffff0000,
- 0x0100ff00ff00ffff, 0x0100ff00ff00ff01, 0x0100ff00ff000100, 0x0100ff00ff010000,
- 0x0100ff0000000000, 0x0100ff00000100ff, 0x0100ff0001ff0101, 0x0100ff0001010101,
- 0x0100ff0100ff00ff, 0x0100ff0100ff0001, 0x0100ff0100000100, 0x0100ff0100010001,
- 0x0100ff0101000000, 0x010000ffff00ff00, 0x010000ff0000ffff, 0x010000ff00000000,
- 0x010000ff010001ff, 0x010000ff01010001, 0x01000000ffffff00, 0x01000000ffff0101,
- 0x01000000ff000000, 0x01000000ff0100ff, 0x01000000ff010101, 0x0100000000ff0000,
- 0x010000000000ff00, 0x01000000000000ff, 0x0100000000000000, 0x0100000000000001,
- 0x0100000000000100, 0x0100000000010000, 0x0100000001000000, 0x0100000100000000,
- 0x01000001000101ff, 0x0100000101ffff01, 0x010001ffff000101, 0x010001ff00ff0100,
- 0x010001ff0000ff00, 0x010001ff000100ff, 0x010001ff01ffffff, 0x01000100ffff0000,
- 0x01000100ff0001ff, 0x0100010000000000, 0x010001000001ff00, 0x0100010001ff0000,
- 0x01000100010000ff, 0x0100010001000101, 0x01000101ff00ff01, 0x0100010100ff0100,
- 0x010001010000ffff, 0x0100010101010001, 0x0101ffffffff0101, 0x0101ffffff0001ff,
- 0x0101ffffff01ffff, 0x0101ffffff010101, 0x0101ffff00000000, 0x0101ffff0101ffff,
- 0x0101ffff010101ff, 0x0101ff00ff000000, 0x0101ff0000ff0100, 0x0101ff000000ff00,
- 0x0101ff0000010000, 0x0101ff00010000ff, 0x0101ff0001000001, 0x0101ff01ff010101,
- 0x0101ff0100000000, 0x0101ff010101ff00, 0x010100ffffff0000, 0x010100ffff010000,
- 0x010100ff00ff01ff, 0x010100ff000000ff, 0x010100ff00000101, 0x010100ff01ffff00,
- 0x01010000ffffff01, 0x01010000ff000100, 0x01010000ff01ff01, 0x0101000000000000,
- 0x01010000000100ff, 0x010100000101ff01, 0x01010001ffff0000, 0x01010001ff00ffff,
- 0x01010001ff010000, 0x0101000101ffffff, 0x0101000101ff01ff, 0x0101000101010101,
- 0x010101ffff01ffff, 0x010101ff00000000, 0x010101ff0001ff01, 0x010101ff0101ffff,
- 0x010101ff010101ff, 0x01010100ffffffff, 0x01010100ff000001, 0x010101000000ff00,
- 0x0101010001010000, 0x0101010100ff0001, 0x010101010001ff01, 0x010101010101ffff,
- };
- static const __device__ uint8_t ksigns_iq2xs[128] = {
- 0, 129, 130, 3, 132, 5, 6, 135, 136, 9, 10, 139, 12, 141, 142, 15,
- 144, 17, 18, 147, 20, 149, 150, 23, 24, 153, 154, 27, 156, 29, 30, 159,
- 160, 33, 34, 163, 36, 165, 166, 39, 40, 169, 170, 43, 172, 45, 46, 175,
- 48, 177, 178, 51, 180, 53, 54, 183, 184, 57, 58, 187, 60, 189, 190, 63,
- 192, 65, 66, 195, 68, 197, 198, 71, 72, 201, 202, 75, 204, 77, 78, 207,
- 80, 209, 210, 83, 212, 85, 86, 215, 216, 89, 90, 219, 92, 221, 222, 95,
- 96, 225, 226, 99, 228, 101, 102, 231, 232, 105, 106, 235, 108, 237, 238, 111,
- 240, 113, 114, 243, 116, 245, 246, 119, 120, 249, 250, 123, 252, 125, 126, 255,
- };
- static const __device__ uint64_t ksigns64[128] = {
- 0x0000000000000000, 0xff000000000000ff, 0xff0000000000ff00, 0x000000000000ffff,
- 0xff00000000ff0000, 0x0000000000ff00ff, 0x0000000000ffff00, 0xff00000000ffffff,
- 0xff000000ff000000, 0x00000000ff0000ff, 0x00000000ff00ff00, 0xff000000ff00ffff,
- 0x00000000ffff0000, 0xff000000ffff00ff, 0xff000000ffffff00, 0x00000000ffffffff,
- 0xff0000ff00000000, 0x000000ff000000ff, 0x000000ff0000ff00, 0xff0000ff0000ffff,
- 0x000000ff00ff0000, 0xff0000ff00ff00ff, 0xff0000ff00ffff00, 0x000000ff00ffffff,
- 0x000000ffff000000, 0xff0000ffff0000ff, 0xff0000ffff00ff00, 0x000000ffff00ffff,
- 0xff0000ffffff0000, 0x000000ffffff00ff, 0x000000ffffffff00, 0xff0000ffffffffff,
- 0xff00ff0000000000, 0x0000ff00000000ff, 0x0000ff000000ff00, 0xff00ff000000ffff,
- 0x0000ff0000ff0000, 0xff00ff0000ff00ff, 0xff00ff0000ffff00, 0x0000ff0000ffffff,
- 0x0000ff00ff000000, 0xff00ff00ff0000ff, 0xff00ff00ff00ff00, 0x0000ff00ff00ffff,
- 0xff00ff00ffff0000, 0x0000ff00ffff00ff, 0x0000ff00ffffff00, 0xff00ff00ffffffff,
- 0x0000ffff00000000, 0xff00ffff000000ff, 0xff00ffff0000ff00, 0x0000ffff0000ffff,
- 0xff00ffff00ff0000, 0x0000ffff00ff00ff, 0x0000ffff00ffff00, 0xff00ffff00ffffff,
- 0xff00ffffff000000, 0x0000ffffff0000ff, 0x0000ffffff00ff00, 0xff00ffffff00ffff,
- 0x0000ffffffff0000, 0xff00ffffffff00ff, 0xff00ffffffffff00, 0x0000ffffffffffff,
- 0xffff000000000000, 0x00ff0000000000ff, 0x00ff00000000ff00, 0xffff00000000ffff,
- 0x00ff000000ff0000, 0xffff000000ff00ff, 0xffff000000ffff00, 0x00ff000000ffffff,
- 0x00ff0000ff000000, 0xffff0000ff0000ff, 0xffff0000ff00ff00, 0x00ff0000ff00ffff,
- 0xffff0000ffff0000, 0x00ff0000ffff00ff, 0x00ff0000ffffff00, 0xffff0000ffffffff,
- 0x00ff00ff00000000, 0xffff00ff000000ff, 0xffff00ff0000ff00, 0x00ff00ff0000ffff,
- 0xffff00ff00ff0000, 0x00ff00ff00ff00ff, 0x00ff00ff00ffff00, 0xffff00ff00ffffff,
- 0xffff00ffff000000, 0x00ff00ffff0000ff, 0x00ff00ffff00ff00, 0xffff00ffff00ffff,
- 0x00ff00ffffff0000, 0xffff00ffffff00ff, 0xffff00ffffffff00, 0x00ff00ffffffffff,
- 0x00ffff0000000000, 0xffffff00000000ff, 0xffffff000000ff00, 0x00ffff000000ffff,
- 0xffffff0000ff0000, 0x00ffff0000ff00ff, 0x00ffff0000ffff00, 0xffffff0000ffffff,
- 0xffffff00ff000000, 0x00ffff00ff0000ff, 0x00ffff00ff00ff00, 0xffffff00ff00ffff,
- 0x00ffff00ffff0000, 0xffffff00ffff00ff, 0xffffff00ffffff00, 0x00ffff00ffffffff,
- 0xffffffff00000000, 0x00ffffff000000ff, 0x00ffffff0000ff00, 0xffffffff0000ffff,
- 0x00ffffff00ff0000, 0xffffffff00ff00ff, 0xffffffff00ffff00, 0x00ffffff00ffffff,
- 0x00ffffffff000000, 0xffffffffff0000ff, 0xffffffffff00ff00, 0x00ffffffff00ffff,
- 0xffffffffffff0000, 0x00ffffffffff00ff, 0x00ffffffffffff00, 0xffffffffffffffff,
- };
- static const __device__ uint8_t kmask_iq2xs[8] = {1, 2, 4, 8, 16, 32, 64, 128};
- static const __device__ int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
- typedef half dfloat; // dequantize float
- typedef half2 dfloat2;
- typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v);
- typedef void (*to_fp16_cuda_t)(const void * __restrict__ x, dfloat * __restrict__ y, int k, cudaStream_t stream);
- typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs);
- typedef void (*allocate_tiles_cuda_t)(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc);
- typedef void (*load_tiles_cuda_t)(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row);
- typedef float (*vec_dot_q_mul_mat_cuda_t)(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ms, const int & i, const int & j, const int & k);
- // Utility function
- #if defined(USE_ROCM)
- #ifndef __has_builtin
- #define __has_builtin(x) 0
- #endif
- typedef int8_t int8x4_t __attribute__((ext_vector_type(4)));
- static __device__ __forceinline__ int __vsubss4(const int a, const int b) {
- const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
- const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
- #if __has_builtin(__builtin_elementwise_sub_sat)
- const int8x4_t c = __builtin_elementwise_sub_sat(va, vb);
- return reinterpret_cast<const int &>(c);
- #else
- int8x4_t c;
- int16_t tmp;
- #pragma unroll
- for (int i = 0; i < 4; i++) {
- tmp = va[i] - vb[i];
- if(tmp > std::numeric_limits<int8_t>::max()) tmp = std::numeric_limits<int8_t>::max();
- if(tmp < std::numeric_limits<int8_t>::min()) tmp = std::numeric_limits<int8_t>::min();
- c[i] = tmp;
- }
- return reinterpret_cast<int &>(c);
- #endif // __has_builtin(__builtin_elementwise_sub_sat)
- }
- static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
- #if __has_builtin(__builtin_amdgcn_sdot4)
- c = __builtin_amdgcn_sdot4(a, b, c, false);
- #else
- const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
- const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
- c += va[0] * vb[0] + va[1] * vb[1] + va[2] * vb[2] + va[3] * vb[3];
- #endif
- return c;
- }
- #endif // defined(USE_ROCM)
- static __device__ __forceinline__ int get_int_from_int8(const int8_t * x8, const int & i32) {
- const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment
- int x32 = 0;
- x32 |= x16[0] << 0;
- x32 |= x16[1] << 16;
- return x32;
- }
- static __device__ __forceinline__ int get_int_from_uint8(const uint8_t * x8, const int & i32) {
- const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment
- int x32 = 0;
- x32 |= x16[0] << 0;
- x32 |= x16[1] << 16;
- return x32;
- }
- static __device__ __forceinline__ int get_int_from_int8_aligned(const int8_t * x8, const int & i32) {
- return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
- }
- static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * x8, const int & i32) {
- return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
- }
- // Dequant functions
- static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
- const block_q4_0 * x = (const block_q4_0 *) vx;
- const dfloat d = x[ib].d;
- const int vui = x[ib].qs[iqs];
- v.x = __int2half_rn(vui & 0xF);
- v.y = __int2half_rn(vui >> 4);
- v = __hsub2(v, __floats2half2_rn(8.0f, 8.0f));
- v = __hmul2(v, {d, d});
- }
- static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, dfloat2 & v){
- const block_q4_1 * x = (const block_q4_1 *) vx;
- const dfloat d = __low2half(x[ib].dm);
- const dfloat m = __high2half(x[ib].dm);
- const int vui = x[ib].qs[iqs];
- v.x = __int2half_rn(vui & 0xF);
- v.y = __int2half_rn(vui >> 4);
- v = __hmul2(v, {d, d});
- v = __hadd2(v, {m, m});
- }
- static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
- const block_q5_0 * x = (const block_q5_0 *) vx;
- const dfloat d = x[ib].d;
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
- const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
- const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
- v.x = __int2half_rn((x[ib].qs[iqs] & 0xf) | xh_0);
- v.y = __int2half_rn((x[ib].qs[iqs] >> 4) | xh_1);
- v = __hsub2(v, __floats2half2_rn(16.0f, 16.0f));
- v = __hmul2(v, {d, d});
- }
- static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, dfloat2 & v){
- const block_q5_1 * x = (const block_q5_1 *) vx;
- const dfloat d = __low2half(x[ib].dm);
- const dfloat m = __high2half(x[ib].dm);
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
- const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
- const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
- v.x = __int2half_rn((x[ib].qs[iqs] & 0xf) | xh_0);
- v.y = __int2half_rn((x[ib].qs[iqs] >> 4) | xh_1);
- v = __hmul2(v, {d, d});
- v = __hadd2(v, {m, m});
- }
- static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, dfloat2 & v){
- const block_q8_0 * x = (const block_q8_0 *) vx;
- const dfloat d = x[ib].d;
- v.x = __int2half_rn(x[ib].qs[iqs + 0]);
- v.y = __int2half_rn(x[ib].qs[iqs + 1]);
- v = __hmul2(v, {d, d});
- }
- template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
- static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) {
- const int i = 2*(blockDim.x*blockIdx.x + threadIdx.x);
- if (i >= k) {
- return;
- }
- const int ib = i/qk; // block index
- const int iqs = (i%qk)/qr; // quant index
- const int iybs = i - i%qk; // y block start index
- const int y_offset = qr == 1 ? 1 : qk/2;
- // dequantize
- dfloat2 v;
- dequantize_kernel(vx, ib, iqs, v);
- y[iybs + iqs + 0] = v.x;
- y[iybs + iqs + y_offset] = v.y;
- }
- template<typename dst_t>
- static __global__ void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const int i = blockIdx.x;
- const block_q2_K * x = (const block_q2_K *) vx;
- const int tid = threadIdx.x;
- const int n = tid/32;
- const int l = tid - 32*n;
- const int is = 8*n + l/16;
- const uint8_t q = x[i].qs[32*n + l];
- dst_t * y = yy + i*QK_K + 128*n;
- half dall = __low2half(x[i].dm);
- half dmin = __high2half(x[i].dm);
- y[l+ 0] = __hsub(__hmul(dall, __int2half_rn((x[i].scales[is+0] & 0xF) * ((q >> 0) & 3))), __hmul(dmin, __int2half_rn(x[i].scales[is+0] >> 4)));
- y[l+32] = __hsub(__hmul(dall, __int2half_rn((x[i].scales[is+2] & 0xF) * ((q >> 2) & 3))), __hmul(dmin, __int2half_rn(x[i].scales[is+2] >> 4)));
- y[l+64] = __hsub(__hmul(dall, __int2half_rn((x[i].scales[is+4] & 0xF) * ((q >> 4) & 3))), __hmul(dmin, __int2half_rn(x[i].scales[is+4] >> 4)));
- y[l+96] = __hsub(__hmul(dall, __int2half_rn((x[i].scales[is+6] & 0xF) * ((q >> 6) & 3))), __hmul(dmin, __int2half_rn(x[i].scales[is+6] >> 4)));
- }
- template<typename dst_t>
- static __global__ void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const int i = blockIdx.x;
- const block_q3_K * x = (const block_q3_K *) vx;
- const int r = threadIdx.x/4;
- const int tid = r/2;
- const int is0 = r%2;
- const int l0 = 16*is0 + 4*(threadIdx.x%4);
- const int n = tid / 4;
- const int j = tid - 4*n;
- uint8_t m = 1 << (4*n + j);
- int is = 8*n + 2*j + is0;
- int shift = 2*j;
- int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
- is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) :
- is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) :
- (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4);
- half d_all = x[i].d;
- half dl = __hmul(d_all, __int2half_rn(us - 32));
- dst_t * y = yy + i*QK_K + 128*n + 32*j;
- const uint8_t * q = x[i].qs + 32*n;
- const uint8_t * hm = x[i].hmask;
- for (int l = l0; l < l0+4; ++l) y[l] = __hmul(dl, __int2half_rn((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4)));
- }
- static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
- if (j < 4) {
- d = q[j] & 63; m = q[j + 4] & 63;
- } else {
- d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
- m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
- }
- }
- template<typename dst_t>
- static __global__ void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const block_q4_K * x = (const block_q4_K *) vx;
- const int i = blockIdx.x;
- // assume 32 threads
- const int tid = threadIdx.x;
- const int il = tid/8;
- const int ir = tid%8;
- const int is = 2*il;
- const int n = 4;
- dst_t * y = yy + i*QK_K + 64*il + n*ir;
- const half dall = __low2half(x[i].dm);
- const half dmin = __high2half(x[i].dm);
- const uint8_t * q = x[i].qs + 32*il + n*ir;
- uint8_t sc, m;
- get_scale_min_k4(is + 0, x[i].scales, sc, m);
- const half d1 = __hmul(dall, __int2half_rn(sc));
- const half m1 = __hmul(dmin, __int2half_rn(m));
- get_scale_min_k4(is + 1, x[i].scales, sc, m);
- const half d2 = __hmul(dall, __int2half_rn(sc));
- const half m2 = __hmul(dmin, __int2half_rn(m));
- for (int l = 0; l < n; ++l) {
- y[l + 0] = __hsub(__hmul(d1, __int2half_rn(q[l] & 0xF)), m1);
- y[l +32] = __hsub(__hmul(d2, __int2half_rn(q[l] >> 4)), m2);
- }
- }
- template<typename dst_t>
- static __global__ void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const block_q5_K * x = (const block_q5_K *) vx;
- const int i = blockIdx.x;
- // assume 64 threads - this is very slightly better than the one below
- const int tid = threadIdx.x;
- const int il = tid/16; // il is in 0...3
- const int ir = tid%16; // ir is in 0...15
- const int is = 2*il; // is is in 0...6
- dst_t * y = yy + i*QK_K + 64*il + 2*ir;
- const half dall = __low2half(x[i].dm);
- const half dmin = __high2half(x[i].dm);
- const uint8_t * ql = x[i].qs + 32*il + 2*ir;
- const uint8_t * qh = x[i].qh + 2*ir;
- uint8_t sc, m;
- get_scale_min_k4(is + 0, x[i].scales, sc, m);
- const half d1 = __hmul(dall, __int2half_rn(sc)); const half m1 = __hmul(dmin, __int2half_rn(m));
- get_scale_min_k4(is + 1, x[i].scales, sc, m);
- const half d2 = __hmul(dall, __int2half_rn(sc)); const half m2 = __hmul(dmin, __int2half_rn(m));
- uint8_t hm = 1 << (2*il);
- y[ 0] = __hsub(__hmul(d1, __int2half_rn((ql[0] & 0xF) + (qh[0] & hm ? 16 : 0))), m1);
- y[ 1] = __hsub(__hmul(d1, __int2half_rn((ql[1] & 0xF) + (qh[1] & hm ? 16 : 0))), m1);
- hm <<= 1;
- y[32] = __hsub(__hmul(d2, __int2half_rn((ql[0] >> 4) + (qh[0] & hm ? 16 : 0))), m2);
- y[33] = __hsub(__hmul(d2, __int2half_rn((ql[1] >> 4) + (qh[1] & hm ? 16 : 0))), m2);
- }
- template<typename dst_t>
- static __global__ void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const block_q6_K * x = (const block_q6_K *) vx;
- const int i = blockIdx.x;
- // assume 64 threads - this is very slightly better than the one below
- const int tid = threadIdx.x;
- const int ip = tid/32; // ip is 0 or 1
- const int il = tid - 32*ip; // 0...32
- const int is = 8*ip + il/16;
- dst_t * y = yy + i*QK_K + 128*ip + il;
- const half d = x[i].d;
- const uint8_t * ql = x[i].ql + 64*ip + il;
- const uint8_t qh = x[i].qh[32*ip + il];
- const int8_t * sc = x[i].scales + is;
- y[ 0] = __hmul(d, __int2half_rn(sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32)));
- y[32] = __hmul(d, __int2half_rn(sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32)));
- y[64] = __hmul(d, __int2half_rn(sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32)));
- y[96] = __hmul(d, __int2half_rn(sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32)));
- }
- template<typename dst_t>
- static __global__ void dequantize_block_iq2_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const int i = blockIdx.x;
- const block_iq2_xxs * x = (const block_iq2_xxs *) vx;
- const int tid = threadIdx.x;
- const int il = tid/8; // 0...3
- const int ib = tid%8; // 0...7
- dst_t * y = yy + i*QK_K + 32*ib + 8*il;
- const uint16_t * q2 = x[i].qs + 4*ib;
- const uint8_t * aux8 = (const uint8_t *)q2;
- const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[il]);
- const uint32_t aux32 = q2[2] | (q2[3] << 16);
- const float d = __half2float(x[i].d) * (0.5f + (aux32 >> 28)) * 0.25f;
- const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127];
- for (int j = 0; j < 8; ++j) y[j] = __float2half(d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f));
- }
- template<typename dst_t>
- static __global__ void dequantize_block_iq2_xs(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const int i = blockIdx.x;
- const block_iq2_xs * x = (const block_iq2_xs *) vx;
- const int tid = threadIdx.x;
- const int il = tid/8; // 0...3
- const int ib = tid%8; // 0...7
- dst_t * y = yy + i*QK_K + 32*ib + 8*il;
- const uint16_t * q2 = x[i].qs + 4*ib;
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[il] & 511));
- const float d = __half2float(x[i].d) * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f;
- const uint8_t signs = ksigns_iq2xs[q2[il] >> 9];
- for (int j = 0; j < 8; ++j) y[j] = __float2half(d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f));
- }
- template<typename dst_t>
- static __global__ void dequantize_block_iq2_s(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const int i = blockIdx.x;
- const block_iq2_s * x = (const block_iq2_s *) vx;
- const int tid = threadIdx.x;
- const int il = tid/8; // 0...3
- const int ib = tid%8; // 0...7
- dst_t * y = yy + i*QK_K + 32*ib + 8*il;
- const uint8_t * grid = (const uint8_t *)(iq2s_grid + (x[i].qs[4*ib+il] | ((x[i].qh[ib] << (8-2*il)) & 0x300)));
- const float d = __half2float(x[i].d) * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f;
- const uint8_t signs = x[i].qs[QK_K/8+4*ib+il];
- for (int j = 0; j < 8; ++j) y[j] = __float2half(d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f));
- }
- template<typename dst_t>
- static __global__ void dequantize_block_iq3_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const int i = blockIdx.x;
- const block_iq3_xxs * x = (const block_iq3_xxs *) vx;
- const int tid = threadIdx.x;
- const int il = tid/8; // 0...3
- const int ib = tid%8; // 0...7
- dst_t * y = yy + i*QK_K + 32*ib + 8*il;
- const uint8_t * q3 = x[i].qs + 8*ib;
- const uint16_t * gas = (const uint16_t *)(x[i].qs + QK_K/4) + 2*ib;
- const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*il+0]);
- const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*il+1]);
- const uint32_t aux32 = gas[0] | (gas[1] << 16);
- const float d = __half2float(x[i].d) * (0.5f + (aux32 >> 28)) * 0.5f;
- const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127];
- for (int j = 0; j < 4; ++j) {
- y[j+0] = __float2half(d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f));
- y[j+4] = __float2half(d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f));
- }
- }
- template<typename dst_t>
- static __global__ void dequantize_block_iq3_s(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const int i = blockIdx.x;
- const block_iq3_s * x = (const block_iq3_s *) vx;
- const int tid = threadIdx.x;
- const int il = tid/8; // 0...3
- const int ib = tid%8; // 0...7
- dst_t * y = yy + i*QK_K + 32*ib + 8*il;
- const uint8_t * qs = x[i].qs + 8*ib;
- const uint8_t * grid1 = (const uint8_t *)(iq3xs_grid + (qs[2*il+0] | ((x[i].qh[ib] << (8-2*il)) & 256)));
- const uint8_t * grid2 = (const uint8_t *)(iq3xs_grid + (qs[2*il+1] | ((x[i].qh[ib] << (7-2*il)) & 256)));
- const float d = __half2float(x[i].d) * (0.5f + ((x[i].scales[ib/2] >> 4*(ib%2)) & 0xf)) * 0.5f;
- const uint8_t signs = x[i].signs[4*ib + il];
- for (int j = 0; j < 4; ++j) {
- y[j+0] = __float2half(d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f));
- y[j+4] = __float2half(d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f));
- }
- }
- template<typename dst_t>
- static __global__ void dequantize_block_iq1_s(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const int i = blockIdx.x;
- const block_iq1_s * x = (const block_iq1_s *) vx;
- const int tid = threadIdx.x;
- const int il = tid/8; // 0...3
- const int ib = tid%8; // 0...7
- dst_t * y = yy + i*QK_K + 32*ib + 8*il;
- const int i8 = 4*ib+il;
- uint8_t h = x[i].scales[i8/2] >> 4*(i8%2);
- const int8_t * grid = (const int8_t *)(iq1s_grid + (x[i].qs[i8] | ((h & 8) << 5)));
- const float d = __half2float(x[i].d) * (2*(h & 7) + 1);
- for (int j = 0; j < 8; ++j) y[j] = __float2half(d * grid[j]);
- }
- template<typename dst_t>
- static __global__ void dequantize_block_iq4_nl(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const int i = blockIdx.x;
- const block_iq4_nl * x = (const block_iq4_nl *) vx + i*(QK_K/QK4_NL);
- const int tid = threadIdx.x;
- const int il = tid/8; // 0...3
- const int ib = tid%8; // 0...7
- dst_t * y = yy + i*QK_K + 32*ib + 4*il;
- const uint8_t * q4 = x[ib].qs + 4*il;
- const float d = __half2float(x[ib].d);
- for (int j = 0; j < 4; ++j) {
- y[j+ 0] = __float2half(d * kvalues_iq4nl[q4[j] & 0xf]);
- y[j+16] = __float2half(d * kvalues_iq4nl[q4[j] >> 4]);
- }
- }
- template<typename dst_t>
- static __global__ void dequantize_block_iq4_xs(const void * __restrict__ vx, dst_t * __restrict__ yy) {
- const int i = blockIdx.x;
- const block_iq4_xs * x = (const block_iq4_xs *)vx;
- const int tid = threadIdx.x;
- const int il = tid/8; // 0...3
- const int ib = tid%8; // 0...7
- dst_t * y = yy + i*QK_K + 32*ib + 4*il;
- const uint8_t * q4 = x[i].qs + 16*ib + 4*il;
- const float d = __half2float(x[i].d) * ((((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4)) - 32);
- for (int j = 0; j < 4; ++j) {
- y[j+ 0] = __float2half(d * kvalues_iq4nl[q4[j] & 0xf]);
- y[j+16] = __float2half(d * kvalues_iq4nl[q4[j] >> 4]);
- }
- }
- template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
- static void dequantize_block_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int k, cudaStream_t stream) {
- const int num_blocks = (k + 2*CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / (2*CUDA_DEQUANTIZE_BLOCK_SIZE);
- dequantize_block<qk, qr, dequantize_kernel><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
- }
- template<typename dst_t>
- static void dequantize_row_q2_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_q2_K<<<nb, 64, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_q3_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_q3_K<<<nb, 64, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_q4_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_q4_K<<<nb, 32, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_q5_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_q5_K<<<nb, 64, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_q6_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_q6_K<<<nb, 64, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_iq2_xxs_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_iq2_xxs<<<nb, 32, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_iq2_xs_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_iq2_xs<<<nb, 32, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_iq2_s_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_iq2_s<<<nb, 32, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_iq3_xxs_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_iq3_xxs<<<nb, 32, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_iq3_s_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_iq3_s<<<nb, 32, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_iq1_s_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = k / QK_K;
- dequantize_block_iq1_s<<<nb, 32, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_iq4_nl_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = (k + QK_K - 1) / QK_K;
- dequantize_block_iq4_nl<<<nb, 32, 0, stream>>>(vx, y);
- }
- template<typename dst_t>
- static void dequantize_row_iq4_xs_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
- const int nb = (k + QK_K - 1) / QK_K;
- dequantize_block_iq4_xs<<<nb, 32, 0, stream>>>(vx, y);
- }
- static to_fp16_cuda_t ggml_get_to_fp16_cuda(int type) {
- switch (type) {
- case 2:
- return dequantize_block_cuda<QK4_0, QR4_0, dequantize_q4_0>;
- case 3:
- return dequantize_block_cuda<QK4_1, QR4_1, dequantize_q4_1>;
- case 6:
- return dequantize_block_cuda<QK5_0, QR5_0, dequantize_q5_0>;
- case 7:
- return dequantize_block_cuda<QK5_1, QR5_1, dequantize_q5_1>;
- case 8:
- return dequantize_block_cuda<QK8_0, QR8_0, dequantize_q8_0>;
- case 10:
- return dequantize_row_q2_K_cuda;
- case 11:
- return dequantize_row_q3_K_cuda;
- case 12:
- return dequantize_row_q4_K_cuda;
- case 13:
- return dequantize_row_q5_K_cuda;
- case 14:
- return dequantize_row_q6_K_cuda;
- case 16:
- return dequantize_row_iq2_xxs_cuda;
- case 17:
- return dequantize_row_iq2_xs_cuda;
- case 18:
- return dequantize_row_iq3_xxs_cuda;
- case 19:
- return dequantize_row_iq1_s_cuda;
- case 20:
- return dequantize_row_iq4_nl_cuda;
- case 21:
- return dequantize_row_iq3_s_cuda;
- case 22:
- return dequantize_row_iq2_s_cuda;
- case 23:
- return dequantize_row_iq4_xs_cuda;
- default:
- return nullptr;
- }
- }
- // GEMV
- template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
- static __global__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, dfloat * __restrict__ dst, const int ncols, const int nrows) {
- // qk = quantized weights per x block
- // qr = number of quantized weights per data value in x block
- const int row = blockIdx.x*blockDim.y + threadIdx.y;
- if (row >= nrows) {
- return;
- }
- const int tid = threadIdx.x;
- const int iter_stride = 2*GGML_CUDA_DMMV_X;
- const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
- const int y_offset = qr == 1 ? 1 : qk/2;
- half2 tmp = __floats2half2_rn(0.0f, 0.0f); // two sums for f16 to take advantage of half2 intrinsics
- for (int i = 0; i < ncols; i += iter_stride) {
- const int col = i + vals_per_iter*tid;
- const int ib = (row*ncols + col)/qk; // x block index
- const int iqs = (col%qk)/qr; // x quant index
- const int iybs = col - col%qk; // y block start index
- // processing >2 values per i iter is faster for fast GPUs
- #pragma unroll
- for (int j = 0; j < vals_per_iter; j += 2) {
- // process 2 vals per j iter
- // dequantize
- // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
- dfloat2 v;
- dequantize_kernel(vx, ib, iqs + j/qr, v);
- // matrix multiplication
- // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
- tmp = __hadd2(tmp, __hmul2(v, {
- y[iybs + iqs + j/qr + 0],
- y[iybs + iqs + j/qr + y_offset]
- }));
- }
- }
- // sum up partial sums and write back result
- #pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- tmp = __hadd2(tmp, __shfl_xor_sync(0xffffffff, tmp, mask, 32));
- }
- if (tid == 0) {
- dst[row] = __hadd(tmp.x, tmp.y);
- }
- }
- static __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, const dfloat * __restrict__ yy, dfloat * __restrict__ dst, const int ncols, int nrows) {
- static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
- const int row = blockIdx.x*blockDim.y + threadIdx.y;
- if (row > nrows) return;
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
- const block_q2_K * x = (const block_q2_K *)vx + ib0;
- float tmp = 0; // partial sum for thread in warp
- const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...15
- const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
- const int step = 16/K_QUANTS_PER_ITERATION;
- const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
- const int in = tid - step*im; // 0...15 or 0...7
- const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2
- const int q_offset = 32*im + l0;
- const int s_offset = 8*im;
- const int y_offset = 128*im + l0;
- uint32_t aux[4];
- const uint8_t * d = (const uint8_t *)aux;
- const uint8_t * m = (const uint8_t *)(aux + 2);
- for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
- const half * y = yy + i * QK_K + y_offset;
- const uint8_t * q = x[i].qs + q_offset;
- const float dall = __low2float(x[i].dm);
- const float dmin = __high2float(x[i].dm);
- const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset);
- aux[0] = a[0] & 0x0f0f0f0f;
- aux[1] = a[1] & 0x0f0f0f0f;
- aux[2] = (a[0] >> 4) & 0x0f0f0f0f;
- aux[3] = (a[1] >> 4) & 0x0f0f0f0f;
- float sum1 = 0, sum2 = 0;
- for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
- sum1 += __half2float(y[l+ 0]) * d[0] * ((q[l+ 0] >> 0) & 3)
- + __half2float(y[l+32]) * d[2] * ((q[l+ 0] >> 2) & 3)
- + __half2float(y[l+64]) * d[4] * ((q[l+ 0] >> 4) & 3)
- + __half2float(y[l+96]) * d[6] * ((q[l+ 0] >> 6) & 3)
- + __half2float(y[l+16]) * d[1] * ((q[l+16] >> 0) & 3)
- + __half2float(y[l+48]) * d[3] * ((q[l+16] >> 2) & 3)
- + __half2float(y[l+80]) * d[5] * ((q[l+16] >> 4) & 3)
- +__half2float(y[l+112]) * d[7] * ((q[l+16] >> 6) & 3);
- sum2 += __half2float(y[l+ 0]) * m[0] + __half2float(y[l+32]) * m[2] + __half2float(y[l+64]) * m[4] + __half2float(y[ l+96]) * m[6]
- + __half2float(y[l+16]) * m[1] + __half2float(y[l+48]) * m[3] + __half2float(y[l+80]) * m[5] + __half2float(y[l+112]) * m[7];
- }
- tmp += dall * sum1 - dmin * sum2;
- }
- // sum up partial sums and write back result
- #pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
- }
- if (threadIdx.x == 0) {
- dst[row] = __float2half(tmp);
- }
- }
- static __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const dfloat * __restrict__ yy, dfloat * __restrict__ dst, const int ncols, int nrows) {
- const int row = blockIdx.x*blockDim.y + threadIdx.y;
- if (row > nrows) return;
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
- const block_q3_K * x = (const block_q3_K *)vx + ib0;
- float tmp = 0; // partial sum for thread in warp
- const uint16_t kmask1 = 0x0303;
- const uint16_t kmask2 = 0x0f0f;
- const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
- const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
- const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop
- const int step = 16/K_QUANTS_PER_ITERATION;
- const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
- const int in = tid - step*im; // 0....15 or 0...7
- const uint8_t m = 1 << (4*im);
- const int l0 = n*in; // 0...15 or 0...14 in steps of 2
- const int q_offset = 32*im + l0;
- const int y_offset = 128*im + l0;
- uint16_t utmp[4];
- const int8_t * s = (const int8_t *)utmp;
- const uint16_t s_shift = 4*im;
- for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
- const half * y = yy + i * QK_K + y_offset;
- const uint8_t * q = x[i].qs + q_offset;
- const uint8_t * h = x[i].hmask + l0;
- const uint16_t * a = (const uint16_t *)x[i].scales;
- utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4);
- utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4);
- utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4);
- utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4);
- const float d = __half2float(x[i].d);
- float sum = 0;
- for (int l = 0; l < n; ++l) {
- sum += __half2float(y[l+ 0]) * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4))
- + __half2float(y[l+32]) * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4))
- + __half2float(y[l+64]) * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4))
- + __half2float(y[l+96]) * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4));
- sum += __half2float(y[l+16]) * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4))
- + __half2float(y[l+48]) * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4))
- + __half2float(y[l+80]) * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4))
- + __half2float(y[l+112]) * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4));
- }
- tmp += d * sum;
- }
- // sum up partial sums and write back result
- #pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
- }
- if (threadIdx.x == 0) {
- dst[row] = __float2half(tmp);
- }
- }
- static __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const dfloat * __restrict__ yy, dfloat * __restrict__ dst, const int ncols, int nrows) {
- const int row = blockIdx.x*blockDim.y + threadIdx.y;
- if (row > nrows) return;
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
- const block_q4_K * x = (const block_q4_K *)vx + ib0;
- const uint16_t kmask1 = 0x3f3f;
- const uint16_t kmask2 = 0x0f0f;
- const uint16_t kmask3 = 0xc0c0;
- const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
- const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1
- const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4
- const int il = tid/step; // 0...3
- const int ir = tid - step*il; // 0...7 or 0...3
- const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4
- const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
- const int in = il%2;
- const int l0 = n*(2*ir + in);
- const int q_offset = 32*im + l0;
- const int y_offset = 64*im + l0;
- uint16_t aux[4];
- const uint8_t * sc = (const uint8_t *)aux;
- #if K_QUANTS_PER_ITERATION == 2
- uint32_t q32[4];
- const uint8_t * q4 = (const uint8_t *)q32;
- #else
- uint16_t q16[4];
- const uint8_t * q4 = (const uint8_t *)q16;
- #endif
- float tmp = 0; // partial sum for thread in warp
- for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
- const half * y1 = yy + i*QK_K + y_offset;
- const half * y2 = y1 + 128;
- const float dall = __low2float(x[i].dm);
- const float dmin = __high2float(x[i].dm);
- const uint16_t * a = (const uint16_t *)x[i].scales;
- aux[0] = a[im+0] & kmask1;
- aux[1] = a[im+2] & kmask1;
- aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
- aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
- #if K_QUANTS_PER_ITERATION == 2
- const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset);
- const uint32_t * q2 = q1 + 16;
- q32[0] = q1[0] & 0x0f0f0f0f;
- q32[1] = q1[0] & 0xf0f0f0f0;
- q32[2] = q2[0] & 0x0f0f0f0f;
- q32[3] = q2[0] & 0xf0f0f0f0;
- float4 s = {0.f, 0.f, 0.f, 0.f};
- float smin = 0;
- for (int l = 0; l < 4; ++l) {
- s.x += __half2float(y1[l]) * q4[l+0]; s.y += __half2float(y1[l+32]) * q4[l+ 4];
- s.z += __half2float(y2[l]) * q4[l+8]; s.w += __half2float(y2[l+32]) * q4[l+12];
- smin += __half2float(y1[l]) * sc[2] + __half2float(y1[l+32]) * sc[3] + __half2float(y2[l]) * sc[6] + __half2float(y2[l+32]) * sc[7];
- }
- tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
- #else
- const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset);
- const uint16_t * q2 = q1 + 32;
- q16[0] = q1[0] & 0x0f0f;
- q16[1] = q1[0] & 0xf0f0;
- q16[2] = q2[0] & 0x0f0f;
- q16[3] = q2[0] & 0xf0f0;
- float4 s = {0.f, 0.f, 0.f, 0.f};
- float smin = 0;
- for (int l = 0; l < 2; ++l) {
- s.x += __half2float(y1[l]) * q4[l+0]; s.y += __half2float(y1[l+32]) * q4[l+2];
- s.z += __half2float(y2[l]) * q4[l+4]; s.w += __half2float(y2[l+32]) * q4[l+6];
- smin += __half2float(y1[l]) * sc[2] + __half2float(y1[l+32]) * sc[3] + __half2float(y2[l]) * sc[6] + __half2float(y2[l+32]) * sc[7];
- }
- tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
- #endif
- }
- // sum up partial sums and write back result
- #pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
- }
- if (tid == 0) {
- dst[row] = __float2half(tmp);
- }
- }
- static __global__ void dequantize_mul_mat_vec_q5_k(const void * __restrict__ vx, const dfloat * __restrict__ yy, dfloat * __restrict__ dst, const int ncols) {
- const int row = blockIdx.x;
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
- const block_q5_K * x = (const block_q5_K *)vx + ib0;
- float tmp = 0; // partial sum for thread in warp
- const uint16_t kmask1 = 0x3f3f;
- const uint16_t kmask2 = 0x0f0f;
- const uint16_t kmask3 = 0xc0c0;
- const int tid = threadIdx.x/2; // 0...15
- const int ix = threadIdx.x%2;
- const int il = tid/4; // 0...3
- const int ir = tid - 4*il;// 0...3
- const int n = 2;
- const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
- const int in = il%2;
- const int l0 = n*(2*ir + in);
- const int q_offset = 32*im + l0;
- const int y_offset = 64*im + l0;
- const uint8_t hm1 = 1 << (2*im);
- const uint8_t hm2 = hm1 << 4;
- uint16_t aux[4];
- const uint8_t * sc = (const uint8_t *)aux;
- uint16_t q16[8];
- const uint8_t * q4 = (const uint8_t *)q16;
- for (int i = ix; i < num_blocks_per_row; i += 2) {
- const uint8_t * ql1 = x[i].qs + q_offset;
- const uint8_t * qh = x[i].qh + l0;
- const half * y1 = yy + i*QK_K + y_offset;
- const half * y2 = y1 + 128;
- const float dall = __low2float(x[i].dm);
- const float dmin = __high2float(x[i].dm);
- const uint16_t * a = (const uint16_t *)x[i].scales;
- aux[0] = a[im+0] & kmask1;
- aux[1] = a[im+2] & kmask1;
- aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
- aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
- float4 sum = {0.f, 0.f, 0.f, 0.f};
- float smin = 0;
- const uint16_t * q1 = (const uint16_t *)ql1;
- const uint16_t * q2 = q1 + 32;
- q16[0] = q1[0] & 0x0f0f;
- q16[1] = q1[8] & 0x0f0f;
- q16[2] = (q1[0] >> 4) & 0x0f0f;
- q16[3] = (q1[8] >> 4) & 0x0f0f;
- q16[4] = q2[0] & 0x0f0f;
- q16[5] = q2[8] & 0x0f0f;
- q16[6] = (q2[0] >> 4) & 0x0f0f;
- q16[7] = (q2[8] >> 4) & 0x0f0f;
- for (int l = 0; l < n; ++l) {
- sum.x += __half2float(y1[l+ 0]) * (q4[l +0] + (qh[l+ 0] & (hm1 << 0) ? 16 : 0))
- + __half2float(y1[l+16]) * (q4[l +2] + (qh[l+16] & (hm1 << 0) ? 16 : 0));
- sum.y += __half2float(y1[l+32]) * (q4[l +4] + (qh[l+ 0] & (hm1 << 1) ? 16 : 0))
- + __half2float(y1[l+48]) * (q4[l +6] + (qh[l+16] & (hm1 << 1) ? 16 : 0));
- sum.z += __half2float(y2[l+ 0]) * (q4[l +8] + (qh[l+ 0] & (hm2 << 0) ? 16 : 0))
- + __half2float(y2[l+16]) * (q4[l+10] + (qh[l+16] & (hm2 << 0) ? 16 : 0));
- sum.w += __half2float(y2[l+32]) * (q4[l+12] + (qh[l+ 0] & (hm2 << 1) ? 16 : 0))
- + __half2float(y2[l+48]) * (q4[l+14] + (qh[l+16] & (hm2 << 1) ? 16 : 0));
- smin += (__half2float(y1[l]) + __half2float(y1[l+16])) * sc[2] + (__half2float(y1[l+32]) + __half2float(y1[l+48])) * sc[3]
- + (__half2float(y2[l]) + __half2float(y2[l+16])) * sc[6] + (__half2float(y2[l+32]) + __half2float(y2[l+48])) * sc[7];
- }
- tmp += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin;
- }
- // sum up partial sums and write back result
- #pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
- }
- if (threadIdx.x == 0) {
- dst[row] = __float2half(tmp);
- }
- }
- static __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const dfloat * __restrict__ yy, dfloat * __restrict__ dst, const int ncols, int nrows) {
- static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
- const int row = blockIdx.x*blockDim.y + threadIdx.y;
- if (row > nrows) return;
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
- const block_q6_K * x = (const block_q6_K *)vx + ib0;
- const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
- const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1
- const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
- const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
- const int in = tid - step*im; // 0...15 or 0...7
- #if K_QUANTS_PER_ITERATION == 1
- const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
- const int is = 0;
- #else
- const int l0 = 4 * in; // 0, 4, 8, ..., 28
- const int is = in / 4;
- #endif
- const int ql_offset = 64*im + l0;
- const int qh_offset = 32*im + l0;
- const int s_offset = 8*im + is;
- const int y_offset = 128*im + l0;
- float tmp = 0; // partial sum for thread in warp
- for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
- const half * y = yy + i * QK_K + y_offset;
- const uint8_t * ql = x[i].ql + ql_offset;
- const uint8_t * qh = x[i].qh + qh_offset;
- const int8_t * s = x[i].scales + s_offset;
- const float d = __half2float(x[i].d);
- #if K_QUANTS_PER_ITERATION == 1
- float sum = __half2float(y[ 0]) * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
- + __half2float(y[16]) * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
- + __half2float(y[32]) * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
- + __half2float(y[48]) * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32)
- + __half2float(y[64]) * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32)
- + __half2float(y[80]) * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32)
- + __half2float(y[96]) * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
- +__half2float(y[112]) * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
- tmp += sum;
- #else
- float sum = 0;
- for (int l = 0; l < 4; ++l) {
- sum += __half2float(y[l+ 0]) * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
- + __half2float(y[l+32]) * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32)
- + __half2float(y[l+64]) * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32)
- + __half2float(y[l+96]) * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
- }
- tmp += sum;
- #endif
- }
- // sum up partial sums and write back result
- #pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
- }
- if (tid == 0) {
- dst[row] = __float2half(tmp);
- }
- }
- static __global__ void dequantize_mul_mat_vec_iq2_xxs(const void * __restrict__ vx, const dfloat * __restrict__ yy, dfloat * __restrict__ dst, const int ncols, int nrows) {
- const int row = blockIdx.x*blockDim.y + threadIdx.y;
- if (row > nrows) return;
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
- const block_iq2_xxs * x = (const block_iq2_xxs *)vx + ib0;
- float tmp = 0; // partial sum for thread in warp
- const int tid = threadIdx.x/4;
- const int ix = threadIdx.x%4;
- const int q_offset = tid * 4;
- const int y_offset = tid * 32;
- for (int i = ix; i < num_blocks_per_row; i += 4) {
- const half * y = yy + i * QK_K + y_offset;
- const uint16_t * q = x[i].qs + q_offset;
- const uint8_t * aux8 = (const uint8_t *)q;
- uint32_t aux32 = q[2] | (q[3] << 16);
- float sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
- const uint8_t signs = ksigns_iq2xs[aux32 & 127];
- for (int j = 0; j < 8; ++j) {
- sumi += __half2float(y[j]) * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- y += 8;
- aux32 >>= 7;
- }
- tmp += sumi * __half2float(x[i].d) * (0.5f + aux32) * 0.25f;;
- }
- // sum up partial sums and write back result
- #pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
- }
- if (threadIdx.x == 0) {
- dst[row] = __float2half(tmp);
- }
- }
- static __global__ void dequantize_mul_mat_vec_iq2_xs(const void * __restrict__ vx, const dfloat * __restrict__ yy, dfloat * __restrict__ dst, const int ncols, int nrows) {
- const int row = blockIdx.x*blockDim.y + threadIdx.y;
- if (row > nrows) return;
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
- const block_iq2_xs * x = (const block_iq2_xs *)vx + ib0;
- float tmp = 0; // partial sum for thread in warp
- const int tid = threadIdx.x/4;
- const int ix = threadIdx.x%4;
- const int q_offset = tid * 4;
- const int s_offset = tid;
- const int y_offset = tid * 32;
- for (int i = ix; i < num_blocks_per_row; i += 4) {
- const half * y = yy + i * QK_K + y_offset;
- const uint16_t * q = x[i].qs + q_offset;
- const uint8_t ls1 = x[i].scales[s_offset] & 0xf;
- const uint8_t ls2 = x[i].scales[s_offset] >> 4;
- float sumi1 = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi1 += __half2float(y[j]) * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- y += 8;
- }
- float sumi2 = 0;
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi2 += __half2float(y[j]) * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- y += 8;
- }
- const float d = __half2float(x[i].d) * 0.25f;
- tmp += d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2);;
- }
- // sum up partial sums and write back result
- #pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
- }
- if (threadIdx.x == 0) {
- dst[row] = __float2half(tmp);
- }
- }
- static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- // the number of rows may exceed maximum grid size in the y or z dimensions, use the x dimension instead
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
- <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
- <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
- <<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- static void dequantize_mul_mat_vec_q2_K_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2
- const int block_num_y = (nrows + ny - 1) / ny;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(32, ny, 1);
- dequantize_mul_mat_vec_q2_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- static void dequantize_mul_mat_vec_q3_K_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int ny = 2 / K_QUANTS_PER_ITERATION;
- const int block_num_y = (nrows + ny - 1) / ny;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(32, ny, 1);
- dequantize_mul_mat_vec_q3_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- static void dequantize_mul_mat_vec_q4_K_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int ny = 2 / K_QUANTS_PER_ITERATION;
- const int block_num_y = (nrows + ny - 1) / ny;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(32, ny, 1);
- dequantize_mul_mat_vec_q4_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- static void dequantize_mul_mat_vec_q5_K_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const dim3 block_dims(32, 1, 1);
- dequantize_mul_mat_vec_q5_k<<<nrows, block_dims, 0, stream>>>(vx, y, dst, ncols);
- }
- static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int ny = 2 / K_QUANTS_PER_ITERATION;
- const int block_num_y = (nrows + ny - 1) / ny;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(32, ny, 1);
- dequantize_mul_mat_vec_q6_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- static void dequantize_mul_mat_vec_iq2_xxs_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const dim3 block_dims(32, 1, 1);
- dequantize_mul_mat_vec_iq2_xxs<<<nrows, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- static void dequantize_mul_mat_vec_iq2_xs_cuda(const void * vx, const dfloat * y, dfloat * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const dim3 block_dims(32, 1, 1);
- dequantize_mul_mat_vec_iq2_xs<<<nrows, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
- }
- // Q8 gemv
- static __global__ void quantize_q8_1(const half * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded) {
- const int ix = blockDim.x*blockIdx.x + threadIdx.x;
- if (ix >= kx_padded) {
- return;
- }
- const int iy = blockDim.y*blockIdx.y + threadIdx.y;
- const int i_padded = iy*kx_padded + ix;
- block_q8_1 * y = (block_q8_1 *) vy;
- const int ib = i_padded / QK8_1; // block index
- const int iqs = i_padded % QK8_1; // quant index
- const float xi = ix < kx ? __half2float(x[iy*kx + ix]) : 0.0f;
- float amax = fabsf(xi);
- float sum = xi;
- #pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- amax = fmaxf(amax, __shfl_xor_sync(0xffffffff, amax, mask, 32));
- sum += __shfl_xor_sync(0xffffffff, sum, mask, 32);
- }
- const float d = amax / 127;
- const int8_t q = amax == 0.0f ? 0 : roundf(xi / d);
- y[ib].qs[iqs] = q;
- if (iqs > 0) {
- return;
- }
- y[ib].ds.x = __float2half(d);
- y[ib].ds.y = __float2half(sum);
- }
- static void quantize_row_q8_1_cuda(const half * x, void * vy, const int kx, const int ky, cudaStream_t stream) {
- const int64_t kx_padded = (kx + 512 - 1) / 512 * 512;
- const int block_num_x = (kx_padded + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE;
- const dim3 num_blocks(block_num_x, ky, 1);
- const dim3 block_size(CUDA_DEQUANTIZE_BLOCK_SIZE, 1, 1);
- quantize_q8_1<<<num_blocks, block_size, 0, stream>>>(x, vy, kx, kx_padded);
- }
- #define VDR_Q4_0_Q8_1_MMVQ 2
- #define VDR_Q4_0_Q8_1_MMQ 4
- template <int vdr> static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl(
- const int * v, const int * u, const float & d4, const half2 & ds8) {
- int sumi = 0;
- #pragma unroll
- for (int i = 0; i < vdr; ++i) {
- const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
- const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
- // SIMD dot product of quantized values
- sumi = __dp4a(vi0, u[2*i+0], sumi);
- sumi = __dp4a(vi1, u[2*i+1], sumi);
- }
- const float2 ds8f = __half22float2(ds8);
- // second part effectively subtracts 8 from each quant value
- return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y);
- }
- #define VDR_Q4_1_Q8_1_MMVQ 2
- #define VDR_Q4_1_Q8_1_MMQ 4
- template <int vdr> static __device__ __forceinline__ float vec_dot_q4_1_q8_1_impl(
- const int * v, const int * u, const half2 & dm4, const half2 & ds8) {
- int sumi = 0;
- #pragma unroll
- for (int i = 0; i < vdr; ++i) {
- const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
- const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
- // SIMD dot product of quantized values
- sumi = __dp4a(vi0, u[2*i+0], sumi);
- sumi = __dp4a(vi1, u[2*i+1], sumi);
- }
- const float2 tmp = __half22float2(__hmul2(dm4, ds8));
- const float d4d8 = tmp.x;
- const float m4s8 = tmp.y;
- // scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it
- return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1));
- }
- #define VDR_Q5_0_Q8_1_MMVQ 2
- #define VDR_Q5_0_Q8_1_MMQ 4
- template <int vdr> static __device__ __forceinline__ float vec_dot_q5_0_q8_1_impl(
- const int * vl, const int * vh, const int * u, const float & d5, const half2 & ds8) {
- int sumi = 0;
- #pragma unroll
- for (int i = 0; i < vdr; ++i) {
- int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
- vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
- vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
- vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
- vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
- sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values
- int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
- vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
- vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
- vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
- vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
- sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values
- }
- const float2 ds8f = __half22float2(ds8);
- // second part effectively subtracts 16 from each quant value
- return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y);
- }
- #define VDR_Q5_1_Q8_1_MMVQ 2
- #define VDR_Q5_1_Q8_1_MMQ 4
- template <int vdr> static __device__ __forceinline__ float vec_dot_q5_1_q8_1_impl(
- const int * vl, const int * vh, const int * u, const half2 & dm5, const half2 & ds8) {
- int sumi = 0;
- #pragma unroll
- for (int i = 0; i < vdr; ++i) {
- int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
- vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
- vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
- vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
- vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
- sumi = __dp4a(vi0, u[2*i+0], sumi); // SIMD dot product of quantized values
- int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
- vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
- vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
- vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
- vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
- sumi = __dp4a(vi1, u[2*i+1], sumi); // SIMD dot product of quantized values
- }
- const float2 tmp = __half22float2(__hmul2(dm5, ds8));
- const float d5d8 = tmp.x;
- const float m5s8 = tmp.y;
- // scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it
- return sumi*d5d8 + m5s8 / (QI5_1 / vdr);
- }
- #define VDR_Q8_0_Q8_1_MMVQ 2
- #define VDR_Q8_0_Q8_1_MMQ 8
- template <int vdr> static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl(
- const int * v, const int * u, const float & d8_0, const float & d8_1) {
- int sumi = 0;
- #pragma unroll
- for (int i = 0; i < vdr; ++i) {
- // SIMD dot product of quantized values
- sumi = __dp4a(v[i], u[i], sumi);
- }
- return d8_0*d8_1 * sumi;
- }
- template <int vdr> static __device__ __forceinline__ float vec_dot_q8_1_q8_1_impl(
- const int * v, const int * u, const half2 & dm8, const half2 & ds8) {
- int sumi = 0;
- #pragma unroll
- for (int i = 0; i < vdr; ++i) {
- // SIMD dot product of quantized values
- sumi = __dp4a(v[i], u[i], sumi);
- }
- const float2 tmp = __half22float2(__hmul2(dm8, ds8));
- const float d8d8 = tmp.x;
- const float m8s8 = tmp.y;
- // scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it
- return sumi*d8d8 + m8s8 / (QI8_1 / vdr);
- }
- #define VDR_Q2_K_Q8_1_MMVQ 1
- #define VDR_Q2_K_Q8_1_MMQ 2
- // contiguous v/x values
- static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmvq(
- const int & v, const int * __restrict__ u, const uint8_t * __restrict__ scales,
- const half2 & dm2, const float * __restrict__ d8) {
- float sumf_d = 0.0f;
- float sumf_m = 0.0f;
- #pragma unroll
- for (int i = 0; i < QR2_K; ++i) {
- const int sc = scales[2*i];
- const int vi = (v >> (2*i)) & 0x03030303;
- sumf_d += d8[i] * (__dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product
- // fill int with 4x m
- int m = sc >> 4;
- m |= m << 8;
- m |= m << 16;
- sumf_m += d8[i] * __dp4a(m, u[i], 0); // multiply constant q2_K part with sum of q8_1 values
- }
- const float2 dm2f = __half22float2(dm2);
- return dm2f.x*sumf_d - dm2f.y*sumf_m;
- }
- static __device__ __forceinline__ float vec_dot_q2_K_q8_1_impl_mmq(
- const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ scales,
- const half2 & dm2, const float & d8) {
- int sumi_d = 0;
- int sumi_m = 0;
- #pragma unroll
- for (int i0 = 0; i0 < QI8_1; i0 += QI8_1/2) {
- int sumi_d_sc = 0;
- const int sc = scales[i0 / (QI8_1/2)];
- // fill int with 4x m
- int m = sc >> 4;
- m |= m << 8;
- m |= m << 16;
- #pragma unroll
- for (int i = i0; i < i0 + QI8_1/2; ++i) {
- sumi_d_sc = __dp4a(v[i], u[i], sumi_d_sc); // SIMD dot product
- sumi_m = __dp4a(m, u[i], sumi_m); // multiply sum of q8_1 values with m
- }
- sumi_d += sumi_d_sc * (sc & 0xF);
- }
- const float2 dm2f = __half22float2(dm2);
- return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m);
- }
- #define VDR_Q3_K_Q8_1_MMVQ 1
- #define VDR_Q3_K_Q8_1_MMQ 2
- // contiguous v/x values
- static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmvq(
- const int & vl, const int & vh, const int * __restrict__ u, const uint8_t * __restrict__ scales,
- const int & scale_offset, const float & d3, const float * __restrict__ d8) {
- float sumf = 0.0f;
- #pragma unroll
- for (int i = 0; i < QR3_K; ++i) {
- const int isc = scale_offset + 2*i;
- const int isc_low = isc % (QK_K/32);
- const int sc_shift_low = 4 * (isc / (QK_K/32));
- const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF;
- const int isc_high = isc % (QK_K/64);
- const int sc_shift_high = 2 * (isc / (QK_K/64));
- const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4;
- const int sc = (sc_low | sc_high) - 32;
- const int vil = (vl >> (2*i)) & 0x03030303;
- const int vih = ((vh >> i) << 2) & 0x04040404;
- const int vi = __vsubss4(vil, vih);
- sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product
- }
- return d3 * sumf;
- }
- static __device__ __forceinline__ float vec_dot_q3_K_q8_1_impl_mmq(
- const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ scales,
- const float & d3, const float & d8) {
- int sumi = 0;
- #pragma unroll
- for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) {
- int sumi_sc = 0;
- for (int i = i0; i < i0 + QI8_1/2; ++i) {
- sumi_sc = __dp4a(v[i], u[i], sumi_sc); // SIMD dot product
- }
- sumi += sumi_sc * scales[i0 / (QI8_1/2)];
- }
- return d3*d8 * sumi;
- }
- #define VDR_Q4_K_Q8_1_MMVQ 2
- #define VDR_Q4_K_Q8_1_MMQ 8
- // contiguous v/x values
- static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_vmmq(
- const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
- const uint8_t * __restrict__ m, const half2 & dm4, const float * __restrict__ d8) {
- float sumf_d = 0.0f;
- float sumf_m = 0.0f;
- #pragma unroll
- for (int i = 0; i < QR4_K; ++i) {
- const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F;
- const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F;
- const int dot1 = __dp4a(v1i, u[2*i+1], __dp4a(v0i, u[2*i+0], 0)); // SIMD dot product
- const int dot2 = __dp4a(0x01010101, u[2*i+1], __dp4a(0x01010101, u[2*i+0], 0)); // sum of u
- sumf_d += d8[i] * (dot1 * sc[i]);
- sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values
- }
- const float2 dm4f = __half22float2(dm4);
- return dm4f.x*sumf_d - dm4f.y*sumf_m;
- }
- static __device__ __forceinline__ float vec_dot_q4_K_q8_1_impl_mmq(
- const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
- const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) {
- float sumf_d = 0.0f;
- float sumf_m = 0.0f;
- #pragma unroll
- for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) {
- int sumi_d = 0;
- #pragma unroll
- for (int j = 0; j < QI8_1; ++j) {
- sumi_d = __dp4a((v[j] >> (4*i)) & 0x0F0F0F0F, u[i*QI8_1 + j], sumi_d); // SIMD dot product
- }
- const float2 ds8f = __half22float2(ds8[i]);
- sumf_d += ds8f.x * (sc[i] * sumi_d);
- sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val
- }
- const float2 dm4f = __half22float2(dm4);
- return dm4f.x*sumf_d - dm4f.y*sumf_m;
- }
- #define VDR_Q5_K_Q8_1_MMVQ 2
- #define VDR_Q5_K_Q8_1_MMQ 8
- static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_vmmq(
- const int * __restrict__ vl, const int * __restrict__ vh, const int * __restrict__ u, const uint8_t * __restrict__ sc,
- const uint8_t * __restrict__ m, const half2 & dm5, const float * __restrict__ d8) {
- float sumf_d = 0.0f;
- float sumf_m = 0.0f;
- #pragma unroll
- for (int i = 0; i < QR5_K; ++i) {
- const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F;
- const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F;
- const int vh0i = ((vh[0] >> i) << 4) & 0x10101010;
- const int vh1i = ((vh[1] >> i) << 4) & 0x10101010;
- const int v0i = vl0i | vh0i;
- const int v1i = vl1i | vh1i;
- const int dot1 = __dp4a(v0i, u[2*i+0], __dp4a(v1i, u[2*i+1], 0)); // SIMD dot product
- const int dot2 = __dp4a(0x01010101, u[2*i+0], __dp4a(0x01010101, u[2*i+1], 0)); // sum of u
- sumf_d += d8[i] * (dot1 * sc[i]);
- sumf_m += d8[i] * (dot2 * m[i]);
- }
- const float2 dm5f = __half22float2(dm5);
- return dm5f.x*sumf_d - dm5f.y*sumf_m;
- }
- static __device__ __forceinline__ float vec_dot_q5_K_q8_1_impl_mmq(
- const int * __restrict__ v, const int * __restrict__ u, const uint8_t * __restrict__ sc,
- const uint8_t * __restrict__ m, const half2 & dm4, const half2 * __restrict__ ds8) {
- float sumf_d = 0.0f;
- float sumf_m = 0.0f;
- #pragma unroll
- for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) {
- int sumi_d = 0;
- #pragma unroll
- for (int j = 0; j < QI8_1; ++j) {
- sumi_d = __dp4a(v[i*QI8_1 + j], u[i*QI8_1 + j], sumi_d); // SIMD dot product
- }
- const float2 ds8f = __half22float2(ds8[i]);
- sumf_d += ds8f.x * (sc[i] * sumi_d);
- sumf_m += ds8f.y * m[i]; // sum of q8_1 block * q4_K min val
- }
- const float2 dm4f = __half22float2(dm4);
- return dm4f.x*sumf_d - dm4f.y*sumf_m;
- }
- #define VDR_Q6_K_Q8_1_MMVQ 1
- #define VDR_Q6_K_Q8_1_MMQ 8
- // contiguous v/x values
- static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmvq(
- const int & vl, const int & vh, const int * __restrict__ u, const int8_t * __restrict__ scales,
- const float & d, const float * __restrict__ d8) {
- float sumf = 0.0f;
- #pragma unroll
- for (int i = 0; i < QR6_K; ++i) {
- const int sc = scales[4*i];
- const int vil = (vl >> (4*i)) & 0x0F0F0F0F;
- const int vih = ((vh >> (4*i)) << 4) & 0x30303030;
- const int vi = __vsubss4((vil | vih), 0x20202020); // vi = (vil | vih) - 32
- sumf += d8[i] * (__dp4a(vi, u[i], 0) * sc); // SIMD dot product
- }
- return d*sumf;
- }
- static __device__ __forceinline__ float vec_dot_q6_K_q8_1_impl_mmq(
- const int * __restrict__ v, const int * __restrict__ u, const int8_t * __restrict__ sc,
- const float & d6, const float * __restrict__ d8) {
- float sumf_d = 0.0f;
- #pragma unroll
- for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) {
- int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale
- #pragma unroll
- for (int i = i0; i < i0 + 2; ++i) {
- sumi_d.x = __dp4a(v[2*i+0], u[2*i+0], sumi_d.x); // SIMD dot product
- sumi_d.x = __dp4a(v[2*i+1], u[2*i+1], sumi_d.x); // SIMD dot product
- sumi_d.y = __dp4a(v[2*i+4], u[2*i+4], sumi_d.y); // SIMD dot product
- sumi_d.y = __dp4a(v[2*i+5], u[2*i+5], sumi_d.y); // SIMD dot product
- }
- sumf_d += d8[i0/4] * (sc[i0/2+0]*sumi_d.x + sc[i0/2+1]*sumi_d.y);
- }
- return d6 * sumf_d;
- }
- static __device__ __forceinline__ float vec_dot_q4_0_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq;
- int v[VDR_Q4_0_Q8_1_MMVQ];
- int u[2*VDR_Q4_0_Q8_1_MMVQ];
- #pragma unroll
- for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) {
- v[i] = get_int_from_uint8(bq4_0->qs, iqs + i);
- u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
- u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0);
- }
- return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMVQ>(v, u, __half2float(bq4_0->d), bq8_1->ds);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y];
- __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0];
- *x_ql = tile_x_qs;
- *x_dm = (half2 *) tile_x_d;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_0(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- const int kbx = k / QI4_0;
- const int kqsx = k % QI4_0;
- const block_q4_0 * bx0 = (const block_q4_0 *) vx;
- float * x_dmf = (float *) x_dm;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
- // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d;
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI4_0;
- const int kbxd = k % blocks_per_tile_x_row;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) {
- int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = __half2float(bxi->d);
- }
- }
- static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- (void)x_qh; (void)x_sc;
- const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
- const float * x_dmf = (const float *) x_dm;
- int u[2*VDR_Q4_0_Q8_1_MMQ];
- #pragma unroll
- for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) {
- u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
- u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE];
- }
- return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ>
- (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0],
- y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
- }
- static __device__ __forceinline__ float vec_dot_q4_1_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq;
- int v[VDR_Q4_1_Q8_1_MMVQ];
- int u[2*VDR_Q4_1_Q8_1_MMVQ];
- #pragma unroll
- for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) {
- v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i);
- u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
- u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1);
- }
- return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMVQ>(v, u, bq4_1->dm, bq8_1->ds);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_1) + mmq_y/QI4_1];
- *x_ql = tile_x_qs;
- *x_dm = tile_x_dm;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_1(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- const int kbx = k / QI4_1;
- const int kqsx = k % QI4_1;
- const block_q4_1 * bx0 = (const block_q4_1 *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI4_1;
- const int kbxd = k % blocks_per_tile_x_row;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) {
- int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm;
- }
- }
- static __device__ __forceinline__ float vec_dot_q4_1_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
- int u[2*VDR_Q4_1_Q8_1_MMQ];
- #pragma unroll
- for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) {
- u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
- u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE];
- }
- return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMQ>
- (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1],
- y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
- }
- static __device__ __forceinline__ float vec_dot_q5_0_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq;
- int vl[VDR_Q5_0_Q8_1_MMVQ];
- int vh[VDR_Q5_0_Q8_1_MMVQ];
- int u[2*VDR_Q5_0_Q8_1_MMVQ];
- #pragma unroll
- for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) {
- vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i);
- vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i));
- u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
- u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0);
- }
- return vec_dot_q5_0_q8_1_impl<VDR_Q5_0_Q8_1_MMVQ>(vl, vh, u, __half2float(bq5_0->d), bq8_1->ds);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
- __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI5_0) + mmq_y/QI5_0];
- *x_ql = tile_x_ql;
- *x_dm = (half2 *) tile_x_d;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_0(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- const int kbx = k / QI5_0;
- const int kqsx = k % QI5_0;
- const block_q5_0 * bx0 = (const block_q5_0 *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx;
- const int ql = get_int_from_uint8(bxi->qs, kqsx);
- const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0));
- int qs0 = (ql >> 0) & 0x0F0F0F0F;
- qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
- qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
- qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
- qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
- qs0 = __vsubss4(qs0, 0x10101010); // subtract 16
- x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
- int qs1 = (ql >> 4) & 0x0F0F0F0F;
- qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
- qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
- qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
- qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
- qs1 = __vsubss4(qs1, 0x10101010); // subtract 16
- x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI5_0;
- const int kbxd = k % blocks_per_tile_x_row;
- float * x_dmf = (float *) x_dm;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) {
- int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = __half2float(bxi->d);
- }
- }
- static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
- const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0;
- const float * x_dmf = (const float *) x_dm;
- const float * y_df = (const float *) y_ds;
- int u[2*VDR_Q5_0_Q8_1_MMQ];
- #pragma unroll
- for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) {
- u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
- u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE];
- }
- return vec_dot_q8_0_q8_1_impl<QR5_0*VDR_Q5_0_Q8_1_MMQ>
- (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
- }
- static __device__ __forceinline__ float vec_dot_q5_1_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq;
- int vl[VDR_Q5_1_Q8_1_MMVQ];
- int vh[VDR_Q5_1_Q8_1_MMVQ];
- int u[2*VDR_Q5_1_Q8_1_MMVQ];
- #pragma unroll
- for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) {
- vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i);
- vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i));
- u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
- u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1);
- }
- return vec_dot_q5_1_q8_1_impl<VDR_Q5_1_Q8_1_MMVQ>(vl, vh, u, bq5_1->dm, bq8_1->ds);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_1(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_1) + mmq_y/QI5_1];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_1(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- const int kbx = k / QI5_1;
- const int kqsx = k % QI5_1;
- const block_q5_1 * bx0 = (const block_q5_1 *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx;
- const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
- const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1));
- int qs0 = (ql >> 0) & 0x0F0F0F0F;
- qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
- qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
- qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
- qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
- x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
- int qs1 = (ql >> 4) & 0x0F0F0F0F;
- qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
- qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
- qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
- qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
- x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI5_1;
- const int kbxd = k % blocks_per_tile_x_row;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) {
- int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm;
- }
- }
- static __device__ __forceinline__ float vec_dot_q5_1_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
- const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1;
- int u[2*VDR_Q5_1_Q8_1_MMQ];
- #pragma unroll
- for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) {
- u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
- u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE];
- }
- return vec_dot_q8_1_q8_1_impl<QR5_1*VDR_Q5_1_Q8_1_MMQ>
- (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
- }
- static __device__ __forceinline__ float vec_dot_q8_0_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq;
- int v[VDR_Q8_0_Q8_1_MMVQ];
- int u[VDR_Q8_0_Q8_1_MMVQ];
- #pragma unroll
- for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) {
- v[i] = get_int_from_int8(bq8_0->qs, iqs + i);
- u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
- }
- return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMVQ>(v, u, __half2float(bq8_0->d), __low2float(bq8_1->ds));
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q8_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y];
- __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI8_0) + mmq_y/QI8_0];
- *x_ql = tile_x_qs;
- *x_dm = (half2 *) tile_x_d;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q8_0(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- const int kbx = k / QI8_0;
- const int kqsx = k % QI8_0;
- float * x_dmf = (float *) x_dm;
- const block_q8_0 * bx0 = (const block_q8_0 *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI8_0;
- const int kbxd = k % blocks_per_tile_x_row;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) {
- int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = __half2float(bxi->d);
- }
- }
- static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- const float * x_dmf = (const float *) x_dm;
- const float * y_df = (const float *) y_ds;
- return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMQ>
- (&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0],
- y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]);
- }
- static __device__ __forceinline__ float vec_dot_q2_K_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_q2_K * bq2_K = (const block_q2_K *) vbq;
- const int bq8_offset = QR2_K * (iqs / QI8_1);
- const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
- const uint8_t * scales = bq2_K->scales + scale_offset;
- const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs);
- int u[QR2_K];
- float d8[QR2_K];
- #pragma unroll
- for (int i = 0; i < QR2_K; ++ i) {
- u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
- d8[i] = __low2float(bq8_1[bq8_offset + i].ds);
- }
- return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q2_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI2_K) + mmq_y/QI2_K];
- __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- *x_sc = tile_x_sc;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q2_K(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- const int kbx = k / QI2_K;
- const int kqsx = k % QI2_K;
- const block_q2_K * bx0 = (const block_q2_K *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI2_K;
- const int kbxd = k % blocks_per_tile_x_row;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) {
- int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm;
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
- int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4);
- x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4));
- }
- }
- static __device__ __forceinline__ float vec_dot_q2_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- const int kbx = k / QI2_K;
- const int ky = (k % QI2_K) * QR2_K;
- const float * y_df = (const float *) y_ds;
- int v[QR2_K*VDR_Q2_K_Q8_1_MMQ];
- const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2);
- const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2));
- #pragma unroll
- for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) {
- v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303;
- }
- const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4;
- const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE;
- return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]);
- }
- static __device__ __forceinline__ float vec_dot_q3_K_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_q3_K * bq3_K = (const block_q3_K *) vbq;
- const int bq8_offset = QR3_K * (iqs / (QI3_K/2));
- const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
- const float d = __half2float(bq3_K->d);
- const int vl = get_int_from_uint8(bq3_K->qs, iqs);
- // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
- const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset;
- int u[QR3_K];
- float d8[QR3_K];
- #pragma unroll
- for (int i = 0; i < QR3_K; ++i) {
- u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
- d8[i] = __low2float(bq8_1[bq8_offset + i].ds);
- }
- return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q3_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI3_K) + mmq_y/QI3_K];
- __shared__ int tile_x_qh[mmq_y * (WARP_SIZE/2) + mmq_y/2];
- __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/4) + mmq_y/4];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- *x_qh = tile_x_qh;
- *x_sc = tile_x_sc;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q3_K(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- const int kbx = k / QI3_K;
- const int kqsx = k % QI3_K;
- const block_q3_K * bx0 = (const block_q3_K *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI3_K;
- const int kbxd = k % blocks_per_tile_x_row;
- float * x_dmf = (float *) x_dm;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) {
- int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = __half2float(bxi->d);
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) {
- int i = i0 + i_offset * 2 + k / (WARP_SIZE/2);
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2);
- // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
- x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2));
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
- int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4);
- const int ksc = k % (QI3_K/4);
- const int ksc_low = ksc % (QI3_K/8);
- const int shift_low = 4 * (ksc / (QI3_K/8));
- const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F;
- const int ksc_high = QI3_K/8;
- const int shift_high = 2 * ksc;
- const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030;
- const int sc = __vsubss4(sc_low | sc_high, 0x20202020);
- x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc;
- }
- }
- static __device__ __forceinline__ float vec_dot_q3_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- const int kbx = k / QI3_K;
- const int ky = (k % QI3_K) * QR3_K;
- const float * x_dmf = (const float *) x_dm;
- const float * y_df = (const float *) y_ds;
- const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4;
- int v[QR3_K*VDR_Q3_K_Q8_1_MMQ];
- #pragma unroll
- for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) {
- const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2);
- const int shift = 2 * ((ky % 32) / 8);
- const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303;
- const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8);
- const int vlh = (vh << 2) & 0x04040404;
- v[l] = __vsubss4(vll, vlh);
- }
- const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE;
- return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]);
- }
- static __device__ __forceinline__ float vec_dot_q4_K_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_q4_K * bq4_K = (const block_q4_K *) vbq;
- int v[2];
- int u[2*QR4_K];
- float d8[QR4_K];
- // iqs is in 0,2..30. bq8_offset = iqs/4 -> bq8_offset = 0, 2, 4, 6
- const int bq8_offset = QR4_K * ((iqs/2) / (QI8_1/2));
- // iqs = 0....3 -> bq8_offset = 0, want q4_offset = 0, 4, 8, 12
- // iqs = 4....7 -> bq8_offset = 2, want q4_offset = 32, 36, 40, 44
- // iqs = 8...11 -> bq8_offset = 4, want q4_offset = 64, 68, 72, 76
- // iqs = 12..15 -> bq8_offset = 6, want q4_offset = 96, 100, 104, 108
- const int * q4 = (const int *)(bq4_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
- v[0] = q4[0];
- v[1] = q4[4];
- const uint16_t * scales = (const uint16_t *)bq4_K->scales;
- uint16_t aux[2];
- const int j = bq8_offset/2;
- if (j < 2) {
- aux[0] = scales[j+0] & 0x3f3f;
- aux[1] = scales[j+2] & 0x3f3f;
- } else {
- aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
- aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
- }
- const uint8_t * sc = (const uint8_t *)aux;
- const uint8_t * m = sc + 2;
- for (int i = 0; i < QR4_K; ++i) {
- const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
- d8[i] = __low2float(bq8i->ds);
- const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
- u[2*i+0] = q8[0];
- u[2*i+1] = q8[4];
- }
- return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, bq4_K->dm, d8);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_ql[mmq_y * (WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI4_K) + mmq_y/QI4_K];
- __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- *x_sc = tile_x_sc;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_K(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- const int kbx = k / QI4_K; // == 0 if QK_K == 256
- const int kqsx = k % QI4_K; // == k if QK_K == 256
- const block_q4_K * bx0 = (const block_q4_K *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx;
- x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256
- const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) {
- int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm;
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
- int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8);
- const int * scales = (const int *) bxi->scales;
- const int ksc = k % (WARP_SIZE/8);
- // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
- int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
- scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
- x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
- }
- }
- static __device__ __forceinline__ float vec_dot_q4_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- (void)x_qh;
- const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8);
- const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE;
- return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8,
- x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]);
- }
- static __device__ __forceinline__ float vec_dot_q5_K_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_q5_K * bq5_K = (const block_q5_K *) vbq;
- int vl[2];
- int vh[2];
- int u[2*QR5_K];
- float d8[QR5_K];
- const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2));
- const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
- const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4));
- vl[0] = ql[0];
- vl[1] = ql[4];
- vh[0] = qh[0] >> bq8_offset;
- vh[1] = qh[4] >> bq8_offset;
- const uint16_t * scales = (const uint16_t *)bq5_K->scales;
- uint16_t aux[2];
- const int j = bq8_offset/2;
- if (j < 2) {
- aux[0] = scales[j+0] & 0x3f3f;
- aux[1] = scales[j+2] & 0x3f3f;
- } else {
- aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
- aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
- }
- const uint8_t * sc = (const uint8_t *)aux;
- const uint8_t * m = sc + 2;
- #pragma unroll
- for (int i = 0; i < QR5_K; ++i) {
- const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
- d8[i] = __low2float(bq8i->ds);
- const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
- u[2*i+0] = q8[0];
- u[2*i+1] = q8[4];
- }
- return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q5_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI5_K) + mmq_y/QI5_K];
- __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- *x_sc = tile_x_sc;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q5_K(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- const int kbx = k / QI5_K; // == 0 if QK_K == 256
- const int kqsx = k % QI5_K; // == k if QK_K == 256
- const block_q5_K * bx0 = (const block_q5_K *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx;
- const int ky = QR5_K*kqsx;
- const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
- const int ql0 = (ql >> 0) & 0x0F0F0F0F;
- const int ql1 = (ql >> 4) & 0x0F0F0F0F;
- const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4));
- const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010;
- const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010;
- const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0;
- const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4);
- x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0;
- x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1;
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256
- const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) {
- int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm;
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
- int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8);
- const int * scales = (const int *) bxi->scales;
- const int ksc = k % (WARP_SIZE/8);
- // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
- int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
- scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
- x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
- }
- }
- static __device__ __forceinline__ float vec_dot_q5_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8);
- const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k;
- const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE;
- return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8,
- x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]);
- }
- static __device__ __forceinline__ float vec_dot_q6_K_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_q6_K * bq6_K = (const block_q6_K *) vbq;
- const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4);
- const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8);
- const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4));
- const int vl = get_int_from_uint8(bq6_K->ql, iqs);
- const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift;
- const int8_t * scales = bq6_K->scales + scale_offset;
- int u[QR6_K];
- float d8[QR6_K];
- #pragma unroll
- for (int i = 0; i < QR6_K; ++i) {
- u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1);
- d8[i] = __low2float(bq8_1[bq8_offset + 2*i].ds);
- }
- return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, __half2float(bq6_K->d), d8);
- }
- template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q6_K(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) {
- __shared__ int tile_x_ql[mmq_y * (2*WARP_SIZE) + mmq_y];
- __shared__ half2 tile_x_dm[mmq_y * (WARP_SIZE/QI6_K) + mmq_y/QI6_K];
- __shared__ int tile_x_sc[mmq_y * (WARP_SIZE/8) + mmq_y/8];
- *x_ql = tile_x_ql;
- *x_dm = tile_x_dm;
- *x_sc = tile_x_sc;
- }
- template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q6_K(
- const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
- int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- const int kbx = k / QI6_K; // == 0 if QK_K == 256
- const int kqsx = k % QI6_K; // == k if QK_K == 256
- const block_q6_K * bx0 = (const block_q6_K *) vx;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
- int i = i0 + i_offset;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx;
- const int ky = QR6_K*kqsx;
- const int ql = get_int_from_uint8(bxi->ql, kqsx);
- const int ql0 = (ql >> 0) & 0x0F0F0F0F;
- const int ql1 = (ql >> 4) & 0x0F0F0F0F;
- const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4));
- const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030;
- const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030;
- const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0;
- const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2);
- x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020);
- x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020);
- }
- const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256
- const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
- float * x_dmf = (float *) x_dm;
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) {
- int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd;
- x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = __half2float(bxi->d);
- }
- #pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
- int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
- if (need_check) {
- i = min(i, i_max);
- }
- const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4;
- x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8));
- }
- }
- static __device__ __forceinline__ float vec_dot_q6_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
- const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) {
- const float * x_dmf = (const float *) x_dm;
- const float * y_df = (const float *) y_ds;
- const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]);
- const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k;
- const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE;
- return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]);
- }
- static __device__ __forceinline__ float vec_dot_iq2_xxs_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_iq2_xxs * bq2 = (const block_iq2_xxs *) vbq;
- const int ib32 = iqs;
- const uint16_t * q2 = bq2->qs + 4*ib32;
- const uint8_t * aux8 = (const uint8_t *)q2;
- const int8_t * q8 = bq8_1[ib32].qs;
- uint32_t aux32 = q2[2] | (q2[3] << 16);
- int sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
- const uint8_t signs = ksigns_iq2xs[aux32 & 127];
- for (int j = 0; j < 8; ++j) {
- sumi += q8[j] * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- aux32 >>= 7;
- }
- const float d = __half2float(bq2->d) * (0.5f + aux32) * __half2float(bq8_1[ib32].ds.x) * 0.25f;
- return d * sumi;
- }
- static __device__ __forceinline__ float vec_dot_iq2_xs_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_iq2_xs * bq2 = (const block_iq2_xs *) vbq;
- const int ib32 = iqs;
- const uint16_t * q2 = bq2->qs + 4*ib32;
- const int8_t * q8 = bq8_1[ib32].qs;
- const uint8_t ls1 = bq2->scales[ib32] & 0xf;
- const uint8_t ls2 = bq2->scales[ib32] >> 4;
- int sumi1 = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi1 += q8[j] * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- int sumi2 = 0;
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi2 += q8[j] * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- const float d = __half2float(bq2->d) * __half2float(bq8_1[ib32].ds.x) * 0.25f;
- return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2);
- }
- static __device__ __forceinline__ float vec_dot_iq2_s_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_iq2_s * bq2 = (const block_iq2_s *) vbq;
- const int ib32 = iqs;
- const int8_t * q8 = bq8_1[ib32].qs;
- const uint8_t * signs = bq2->qs + QK_K/8 + 4*ib32;
- const uint8_t ls1 = bq2->scales[ib32] & 0xf;
- const uint8_t ls2 = bq2->scales[ib32] >> 4;
- int sumi1 = 0;
- for (int l = 0; l < 2; ++l) {
- const uint32_t * grid = (const uint32_t *)(iq2s_grid + (bq2->qs[4*ib32+l] | ((bq2->qh[ib32] << (8-2*l)) & 0x300)));
- const uint32_t signs0 = __vcmpeq4(((signs[l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201);
- const uint32_t signs1 = __vcmpeq4(((signs[l] >> 4) * 0x01010101) & 0x08040201, 0x08040201);
- const int grid_l = __vsub4(grid[0] ^ signs0, signs0);
- const int grid_h = __vsub4(grid[1] ^ signs1, signs1);
- sumi1 = __dp4a(grid_l, *((const int *)q8 + 0), sumi1);
- sumi1 = __dp4a(grid_h, *((const int *)q8 + 1), sumi1);
- q8 += 8;
- }
- int sumi2 = 0;
- for (int l = 2; l < 4; ++l) {
- const uint32_t * grid = (const uint32_t *)(iq2s_grid + (bq2->qs[4*ib32+l] | ((bq2->qh[ib32] << (8-2*l)) & 0x300)));
- const uint32_t signs0 = __vcmpeq4(((signs[l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201);
- const uint32_t signs1 = __vcmpeq4(((signs[l] >> 4) * 0x01010101) & 0x08040201, 0x08040201);
- const int grid_l = __vsub4(grid[0] ^ signs0, signs0);
- const int grid_h = __vsub4(grid[1] ^ signs1, signs1);
- sumi2 = __dp4a(grid_l, *((const int *)q8 + 0), sumi2);
- sumi2 = __dp4a(grid_h, *((const int *)q8 + 1), sumi2);
- q8 += 8;
- }
- const float d = __half2float(bq2->d) * __low2float(bq8_1[ib32].ds) * 0.25f;
- return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2);
- }
- static __device__ __forceinline__ float vec_dot_iq3_xxs_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_iq3_xxs * bq2 = (const block_iq3_xxs *) vbq;
- const int ib32 = iqs;
- const uint8_t * q3 = bq2->qs + 8*ib32;
- const uint16_t * gas = (const uint16_t *)(bq2->qs + QK_K/4) + 2*ib32;
- const int8_t * q8 = bq8_1[ib32].qs;
- uint32_t aux32 = gas[0] | (gas[1] << 16);
- int sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint32_t * grid1 = iq3xxs_grid + q3[2*l+0];
- const uint32_t * grid2 = iq3xxs_grid + q3[2*l+1];
- const uint32_t * signs = (const uint32_t *)(ksigns64 + (aux32 & 127));
- const int grid_l = __vsub4(grid1[0] ^ signs[0], signs[0]);
- const int grid_h = __vsub4(grid2[0] ^ signs[1], signs[1]);
- sumi = __dp4a(grid_l, *((int *)q8+0), sumi);
- sumi = __dp4a(grid_h, *((int *)q8+1), sumi);
- q8 += 8;
- aux32 >>= 7;
- }
- const float d = __half2float(bq2->d) * (0.5f + aux32) * __low2float(bq8_1[ib32].ds) * 0.5f;
- return d * sumi;
- }
- static __device__ __forceinline__ float vec_dot_iq3_s_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_iq3_s * bq2 = (const block_iq3_s *) vbq;
- const int ib32 = iqs;
- const uint8_t * qs = bq2->qs + 8*ib32;
- const int8_t * q8 = bq8_1[ib32].qs;
- int sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint32_t * grid1 = iq3xs_grid + (qs[2*l+0] | ((bq2->qh[ib32] << (8 - 2*l)) & 256));
- const uint32_t * grid2 = iq3xs_grid + (qs[2*l+1] | ((bq2->qh[ib32] << (7 - 2*l)) & 256));
- uint32_t signs0 = __vcmpeq4(((bq2->signs[4*ib32+l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201);
- uint32_t signs1 = __vcmpeq4(((bq2->signs[4*ib32+l] >> 4) * 0x01010101) & 0x08040201, 0x08040201);
- const int grid_l = __vsub4(grid1[0] ^ signs0, signs0);
- const int grid_h = __vsub4(grid2[0] ^ signs1, signs1);
- sumi = __dp4a(grid_l, *((int *)q8+0), sumi);
- sumi = __dp4a(grid_h, *((int *)q8+1), sumi);
- q8 += 8;
- }
- const float d = __half2float(bq2->d) * (0.5f + ((bq2->scales[ib32/2] >> 4*(ib32%2)) & 0xf)) * __low2float(bq8_1[ib32].ds) * 0.5f;
- return d * sumi;
- }
- static __device__ __forceinline__ float vec_dot_iq1_s_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_iq1_s * bq1 = (const block_iq1_s *) vbq;
- const int ib32 = iqs;
- int sumi1 = 0, sumi2 = 0, sumi3 = 0, sumi4 = 0;
- const uint8_t h1 = bq1->scales[2*ib32+0];
- const uint8_t h2 = bq1->scales[2*ib32+1];
- const int * q8 = (const int *)bq8_1[ib32].qs;
- const int * grid1 = (const int *)(iq1s_grid + (bq1->qs[4*ib32+0] | ((h1 & 0x08) << 5)));
- const int * grid2 = (const int *)(iq1s_grid + (bq1->qs[4*ib32+1] | ((h1 & 0x80) << 1)));
- const int * grid3 = (const int *)(iq1s_grid + (bq1->qs[4*ib32+2] | ((h2 & 0x08) << 5)));
- const int * grid4 = (const int *)(iq1s_grid + (bq1->qs[4*ib32+3] | ((h2 & 0x80) << 1)));
- for (int j = 0; j < 2; ++j) {
- sumi1 = __dp4a(q8[j+0], grid1[j], sumi1);
- sumi2 = __dp4a(q8[j+2], grid2[j], sumi2);
- sumi3 = __dp4a(q8[j+4], grid3[j], sumi3);
- sumi4 = __dp4a(q8[j+6], grid4[j], sumi4);
- }
- const float d = __half2float(bq1->d) * __low2float(bq8_1[ib32].ds);
- return d * (sumi1 * (2*(h1 & 7) + 1) + sumi2 * (2*((h1 >> 4) & 7) + 1) +
- sumi3 * (2*(h2 & 7) + 1) + sumi4 * (2*((h2 >> 4) & 7) + 1));
- }
- static __device__ __forceinline__ void get_int_from_table_16(const uint32_t & q4, const uint8_t * values,
- int & val1, int & val2) {
- uint32_t aux32; const uint8_t * q8 = (const uint8_t *)&aux32;
- aux32 = q4 & 0x0f0f0f0f;
- uint16_t v1 = values[q8[0]] | (values[q8[1]] << 8);
- uint16_t v2 = values[q8[2]] | (values[q8[3]] << 8);
- val1 = v1 | (v2 << 16);
- aux32 = (q4 >> 4) & 0x0f0f0f0f;
- v1 = values[q8[0]] | (values[q8[1]] << 8);
- v2 = values[q8[2]] | (values[q8[3]] << 8);
- val2 = v1 | (v2 << 16);
- }
- static __device__ __forceinline__ float vec_dot_iq4_nl_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_iq4_nl * bq = (const block_iq4_nl *) vbq;
- const uint16_t * q4 = (const uint16_t *)bq->qs + 2*iqs;
- const int32_t * q8 = (const int32_t *)bq8_1->qs + iqs;
- const uint8_t * values = (const uint8_t *)kvalues_iq4nl;
- int v1, v2;
- int sumi1 = 0, sumi2 = 0;
- for (int l = 0; l < VDR_Q4_0_Q8_1_MMVQ; ++l) {
- const uint32_t aux = q4[2*l] | (q4[2*l+1] << 16);
- get_int_from_table_16(aux, values, v1, v2);
- sumi1 = __dp4a(v1, q8[l+0], sumi1);
- sumi2 = __dp4a(v2, q8[l+4], sumi2);
- }
- const float d = __half2float(bq->d) * __low2float(bq8_1->ds);
- return d * (sumi1 + sumi2);
- }
- static __device__ __forceinline__ float vec_dot_iq4_xs_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs) {
- const block_iq4_xs * bq4 = (const block_iq4_xs *) vbq;
- const uint8_t * values = (const uint8_t *)kvalues_iq4nl;
- // iqs is 0...7
- const int ib32 = iqs;
- const int32_t * q8 = (const int *)bq8_1[ib32].qs;
- const uint32_t * q4 = (const uint32_t *)bq4->qs + 4*ib32;
- const int8_t ls = ((bq4->scales_l[ib32/2] >> 4*(ib32%2)) & 0xf) | (((bq4->scales_h >> 2*ib32) & 3) << 4);
- const float d = __half2float(bq4->d) * (ls - 32) * __low2float(bq8_1[ib32].ds);
- int v1, v2;
- int sumi1 = 0, sumi2 = 0;
- for (int j = 0; j < 4; ++j) {
- get_int_from_table_16(q4[j], values, v1, v2);
- sumi1 = __dp4a(v1, q8[j+0], sumi1);
- sumi2 = __dp4a(v2, q8[j+4], sumi2);
- }
- return d * (sumi1 + sumi2);
- }
- template <int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda>
- static __global__ void mul_mat_vec_q(const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst, const int ncols, const int nrows) {
- const int row = blockIdx.x*blockDim.y + threadIdx.y;
- if (row >= nrows) {
- return;
- }
- const int blocks_per_row = ncols / qk;
- const int blocks_per_warp = vdr * WARP_SIZE / qi;
- // partial sum for each thread
- float tmp = 0.0f;
- const block_q_t * x = (const block_q_t *) vx;
- const block_q8_1 * y = (const block_q8_1 *) vy;
- for (int i = threadIdx.x / (qi/vdr); i < blocks_per_row; i += blocks_per_warp) {
- const int ibx = row*blocks_per_row + i; // x block index
- const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
- const int iqs = vdr * (threadIdx.x % (qi/vdr)); // x block quant index when casting the quants to int
- tmp += vec_dot_q_cuda(&x[ibx], &y[iby], iqs);
- }
- // sum up partial sums and write back result
- #pragma unroll
- for (int mask = 16; mask > 0; mask >>= 1) {
- tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
- }
- if (threadIdx.x == 0) {
- dst[row] = __float2half(tmp);
- }
- }
- static void mul_mat_vec_q4_0_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_q4_1_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK4_0, QI4_1, block_q4_1, VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_q5_0_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK5_0, QI5_0, block_q5_0, VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_q5_1_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK5_1, QI5_1, block_q5_1, VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_q8_0_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK8_0, QI8_0, block_q8_0, VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_q2_K_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI2_K, block_q2_K, VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_q3_K_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI3_K, block_q3_K, VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_q4_K_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI4_K, block_q4_K, VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_q5_K_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI5_K, block_q5_K, VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_q6_K_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI6_K, block_q6_K, VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_iq2_xxs_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI2_XXS, block_iq2_xxs, 1, vec_dot_iq2_xxs_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_iq2_xs_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI2_XS, block_iq2_xs, 1, vec_dot_iq2_xs_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_iq2_s_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI2_S, block_iq2_s, 1, vec_dot_iq2_s_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_iq3_xxs_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI3_XXS, block_iq3_xxs, 1, vec_dot_iq3_xxs_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_iq1_s_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI1_S, block_iq1_s, 1, vec_dot_iq1_s_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_iq4_nl_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK4_NL, QI4_NL, block_iq4_nl, VDR_Q4_0_Q8_1_MMVQ, vec_dot_iq4_nl_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_iq4_xs_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI4_XS, block_iq4_xs, 1, vec_dot_iq4_xs_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- static void mul_mat_vec_iq3_s_q8_1_cuda(const void * vx, const void * vy, half * dst, const int ncols, const int nrows, cudaStream_t stream) {
- const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
- const dim3 block_nums(block_num_y, 1, 1);
- const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
- mul_mat_vec_q<QK_K, QI3_XS, block_iq3_s, 1, vec_dot_iq3_s_q8_1>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
- }
- template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x, int mmq_y, int nwarps,
- allocate_tiles_cuda_t allocate_tiles, load_tiles_cuda_t load_tiles, int vdr, vec_dot_q_mul_mat_cuda_t vec_dot>
- static __device__ __forceinline__ void mul_mat_q(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const block_q_t * x = (const block_q_t *) vx;
- const block_q8_1 * y = (const block_q8_1 *) vy;
- const int blocks_per_row_x = ncols_x / qk;
- const int blocks_per_col_y = nrows_y / QK8_1;
- const int blocks_per_warp = WARP_SIZE / qi;
- const int & ncols_dst = ncols_y;
- const int row_dst_0 = blockIdx.x*mmq_y;
- const int & row_x_0 = row_dst_0;
- const int col_dst_0 = blockIdx.y*mmq_x;
- const int & col_y_0 = col_dst_0;
- int * tile_x_ql = nullptr;
- half2 * tile_x_dm = nullptr;
- int * tile_x_qh = nullptr;
- int * tile_x_sc = nullptr;
- allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc);
- __shared__ int tile_y_qs[mmq_x * WARP_SIZE];
- __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1];
- float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}};
- for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) {
- load_tiles(x + row_x_0*blocks_per_row_x + ib0, tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc,
- threadIdx.y, nrows_x-row_x_0-1, threadIdx.x, blocks_per_row_x);
- #pragma unroll
- for (int ir = 0; ir < qr; ++ir) {
- const int kqs = ir*WARP_SIZE + threadIdx.x;
- const int kbxd = kqs / QI8_1;
- #pragma unroll
- for (int i = 0; i < mmq_x; i += nwarps) {
- const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses
- const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd];
- const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE;
- tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1);
- }
- #pragma unroll
- for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) {
- const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x;
- const int kby = threadIdx.x % (WARP_SIZE/QI8_1);
- const int col_y_eff = min(col_y_0 + ids, ncols_y-1);
- // if the sum is not needed it's faster to transform the scale to f32 ahead of time
- const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds;
- half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby];
- if (need_sum) {
- *dsi_dst = *dsi_src;
- } else {
- float * dfi_dst = (float *) dsi_dst;
- *dfi_dst = __low2float(*dsi_src);
- }
- }
- __syncthreads();
- // #pragma unroll // unrolling this loop causes too much register pressure
- for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) {
- #pragma unroll
- for (int j = 0; j < mmq_x; j += nwarps) {
- #pragma unroll
- for (int i = 0; i < mmq_y; i += WARP_SIZE) {
- sum[i/WARP_SIZE][j/nwarps] += vec_dot(
- tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds,
- threadIdx.x + i, threadIdx.y + j, k);
- }
- }
- }
- __syncthreads();
- }
- }
- #pragma unroll
- for (int j = 0; j < mmq_x; j += nwarps) {
- const int col_dst = col_dst_0 + j + threadIdx.y;
- if (col_dst >= ncols_dst) {
- return;
- }
- #pragma unroll
- for (int i = 0; i < mmq_y; i += WARP_SIZE) {
- const int row_dst = row_dst_0 + threadIdx.x + i;
- if (row_dst >= nrows_dst) {
- continue;
- }
- dst[col_dst*nrows_dst + row_dst] = __float2half(sum[i/WARP_SIZE][j/nwarps]);
- }
- }
- }
- #if defined(USE_ROCM)
- #define MMQ_X_Q4_0 64
- #define MMQ_Y_Q4_0 128
- #define NWARPS_Q4_0 8
- #else
- #define MMQ_X_Q4_0 4
- #define MMQ_Y_Q4_0 32
- #define NWARPS_Q4_0 4
- #endif
- template <bool need_check> static __global__ void
- #if defined(USE_ROCM)
- __launch_bounds__(WARP_SIZE*NWARPS_Q4_0, 2)
- #endif
- mul_mat_q4_0(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const int mmq_x = MMQ_X_Q4_0;
- const int mmq_y = MMQ_Y_Q4_0;
- const int nwarps = NWARPS_Q4_0;
- mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
- load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- static void ggml_mul_mat_q4_0_q8_1_cuda(
- const void * vx, const void * vy, half * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int mmq_x = MMQ_X_Q4_0;
- int mmq_y = MMQ_Y_Q4_0;
- int nwarps = NWARPS_Q4_0;
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q4_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q4_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- #if defined(USE_ROCM)
- #define MMQ_X_Q4_1 64
- #define MMQ_Y_Q4_1 128
- #define NWARPS_Q4_1 8
- #else
- #define MMQ_X_Q4_1 4
- #define MMQ_Y_Q4_1 32
- #define NWARPS_Q4_1 4
- #endif
- template <bool need_check> static __global__ void
- #if defined(USE_ROCM)
- __launch_bounds__(WARP_SIZE*NWARPS_Q4_1, 2)
- #endif
- mul_mat_q4_1(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const int mmq_x = MMQ_X_Q4_1;
- const int mmq_y = MMQ_Y_Q4_1;
- const int nwarps = NWARPS_Q4_1;
- mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
- load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- static void ggml_mul_mat_q4_1_q8_1_cuda(
- const void * vx, const void * vy, half * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- int mmq_x = MMQ_X_Q4_1;
- int mmq_y = MMQ_Y_Q4_1;
- int nwarps = NWARPS_Q4_1;
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q4_1<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q4_1<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- #if defined(USE_ROCM)
- #define MMQ_X_Q5_0 64
- #define MMQ_Y_Q5_0 128
- #define NWARPS_Q5_0 8
- #else
- #define MMQ_X_Q5_0 4
- #define MMQ_Y_Q5_0 32
- #define NWARPS_Q5_0 4
- #endif
- template <bool need_check> static __global__ void
- #if defined(USE_ROCM)
- __launch_bounds__(WARP_SIZE*NWARPS_Q5_0, 2)
- #endif
- mul_mat_q5_0(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const int mmq_x = MMQ_X_Q5_0;
- const int mmq_y = MMQ_Y_Q5_0;
- const int nwarps = NWARPS_Q5_0;
- mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
- load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- static void ggml_mul_mat_q5_0_q8_1_cuda(
- const void * vx, const void * vy, half * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- const int mmq_x = MMQ_X_Q5_0;
- const int mmq_y = MMQ_Y_Q5_0;
- const int nwarps = NWARPS_Q5_0;
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q5_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q5_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- #if defined(USE_ROCM)
- #define MMQ_X_Q5_1 64
- #define MMQ_Y_Q5_1 128
- #define NWARPS_Q5_1 8
- #else
- #define MMQ_X_Q5_1 4
- #define MMQ_Y_Q5_1 32
- #define NWARPS_Q5_1 4
- #endif
- template <bool need_check> static __global__ void
- #if defined(USE_ROCM)
- __launch_bounds__(WARP_SIZE*NWARPS_Q5_1, 2)
- #endif
- mul_mat_q5_1(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const int mmq_x = MMQ_X_Q5_1;
- const int mmq_y = MMQ_Y_Q5_1;
- const int nwarps = NWARPS_Q5_1;
- mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
- load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- static void ggml_mul_mat_q5_1_q8_1_cuda(
- const void * vx, const void * vy, half * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- const int mmq_x = MMQ_X_Q5_1;
- const int mmq_y = MMQ_Y_Q5_1;
- const int nwarps = NWARPS_Q5_1;
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q5_1<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q5_1<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- #if defined(USE_ROCM)
- #define MMQ_X_Q8_0 64
- #define MMQ_Y_Q8_0 128
- #define NWARPS_Q8_0 8
- #else
- #define MMQ_X_Q8_0 4
- #define MMQ_Y_Q8_0 32
- #define NWARPS_Q8_0 4
- #endif
- template <bool need_check> static __global__ void
- #if defined(USE_ROCM)
- __launch_bounds__(WARP_SIZE*NWARPS_Q8_0, 2)
- #endif
- mul_mat_q8_0(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const int mmq_x = MMQ_X_Q8_0;
- const int mmq_y = MMQ_Y_Q8_0;
- const int nwarps = NWARPS_Q8_0;
- mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
- load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- static void ggml_mul_mat_q8_0_q8_1_cuda(
- const void * vx, const void * vy, half * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- const int mmq_x = MMQ_X_Q8_0;
- const int mmq_y = MMQ_Y_Q8_0;
- const int nwarps = NWARPS_Q8_0;
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q8_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q8_0<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- #if defined(USE_ROCM)
- #define MMQ_X_Q2_K 64
- #define MMQ_Y_Q2_K 128
- #define NWARPS_Q2_K 8
- #else
- #define MMQ_X_Q2_K 4
- #define MMQ_Y_Q2_K 32
- #define NWARPS_Q2_K 4
- #endif
- template <bool need_check> static __global__ void
- #if defined(USE_ROCM)
- __launch_bounds__(WARP_SIZE*NWARPS_Q2_K, 2)
- #endif
- mul_mat_q2_K(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const int mmq_x = MMQ_X_Q2_K;
- const int mmq_y = MMQ_Y_Q2_K;
- const int nwarps = NWARPS_Q2_K;
- mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
- load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- static void ggml_mul_mat_q2_K_q8_1_cuda(
- const void * vx, const void * vy, half * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- const int mmq_x = MMQ_X_Q2_K;
- const int mmq_y = MMQ_Y_Q2_K;
- const int nwarps = NWARPS_Q2_K;
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q2_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q2_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- #if defined(USE_ROCM)
- #define MMQ_X_Q3_K 64
- #define MMQ_Y_Q3_K 128
- #define NWARPS_Q3_K 8
- #else
- #define MMQ_X_Q3_K 4
- #define MMQ_Y_Q3_K 32
- #define NWARPS_Q3_K 4
- #endif
- template <bool need_check> static __global__ void
- #if defined(USE_ROCM)
- __launch_bounds__(WARP_SIZE*NWARPS_Q3_K, 2)
- #endif
- mul_mat_q3_K(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const int mmq_x = MMQ_X_Q3_K;
- const int mmq_y = MMQ_Y_Q3_K;
- const int nwarps = NWARPS_Q3_K;
- mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
- load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- static void ggml_mul_mat_q3_K_q8_1_cuda(
- const void * vx, const void * vy, half * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- const int mmq_x = MMQ_X_Q3_K;
- const int mmq_y = MMQ_Y_Q3_K;
- const int nwarps = NWARPS_Q3_K;
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q3_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q3_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- #if defined(USE_ROCM)
- #define MMQ_X_Q4_K 64
- #define MMQ_Y_Q4_K 128
- #define NWARPS_Q4_K 8
- #else
- #define MMQ_X_Q4_K 4
- #define MMQ_Y_Q4_K 32
- #define NWARPS_Q4_K 4
- #endif
- template <bool need_check> static __global__ void
- #if defined(USE_ROCM)
- __launch_bounds__(WARP_SIZE*NWARPS_Q4_K, 2)
- #endif
- mul_mat_q4_K(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const int mmq_x = MMQ_X_Q4_K;
- const int mmq_y = MMQ_Y_Q4_K;
- const int nwarps = NWARPS_Q4_K;
- mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
- load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- static void ggml_mul_mat_q4_K_q8_1_cuda(
- const void * vx, const void * vy, half * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- const int mmq_x = MMQ_X_Q4_K;
- const int mmq_y = MMQ_Y_Q4_K;
- const int nwarps = NWARPS_Q4_K;
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q4_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q4_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- #if defined(USE_ROCM)
- #define MMQ_X_Q5_K 64
- #define MMQ_Y_Q5_K 128
- #define NWARPS_Q5_K 8
- #else
- #define MMQ_X_Q5_K 4
- #define MMQ_Y_Q5_K 32
- #define NWARPS_Q5_K 4
- #endif
- template <bool need_check> static __global__ void
- #if defined(USE_ROCM)
- __launch_bounds__(WARP_SIZE*NWARPS_Q5_K, 2)
- #endif
- mul_mat_q5_K(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const int mmq_x = MMQ_X_Q5_K;
- const int mmq_y = MMQ_Y_Q5_K;
- const int nwarps = NWARPS_Q5_K;
- mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
- load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- static void ggml_mul_mat_q5_K_q8_1_cuda(
- const void * vx, const void * vy, half * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- const int mmq_x = MMQ_X_Q5_K;
- const int mmq_y = MMQ_Y_Q5_K;
- const int nwarps = NWARPS_Q5_K;
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q5_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q5_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- #if defined(USE_ROCM)
- #define MMQ_X_Q6_K 64
- #define MMQ_Y_Q6_K 128
- #define NWARPS_Q6_K 8
- #else
- #define MMQ_X_Q6_K 4
- #define MMQ_Y_Q6_K 32
- #define NWARPS_Q6_K 4
- #endif
- template <bool need_check> static __global__ void
- #if defined(USE_ROCM)
- __launch_bounds__(WARP_SIZE*NWARPS_Q6_K, 2)
- #endif
- mul_mat_q6_K(
- const void * __restrict__ vx, const void * __restrict__ vy, half * __restrict__ dst,
- const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
- const int mmq_x = MMQ_X_Q6_K;
- const int mmq_y = MMQ_Y_Q6_K;
- const int nwarps = NWARPS_Q6_K;
- mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
- load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- static void ggml_mul_mat_q6_K_q8_1_cuda(
- const void * vx, const void * vy, half * dst, const int ncols_x, const int nrows_x,
- const int ncols_y, const int nrows_y, const int nrows_dst, cudaStream_t stream) {
- const int mmq_x = MMQ_X_Q6_K;
- const int mmq_y = MMQ_Y_Q6_K;
- const int nwarps = NWARPS_Q6_K;
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q6_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q6_K<need_check><<<block_nums, block_dims, 0, stream>>>
- (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
- }
- torch::Tensor ggml_dequantize(
- torch::Tensor W, // quant weight
- int8_t type,
- int64_t m,
- int64_t n
- ){
- const at::cuda::OptionalCUDAGuard device_guard(device_of(W));
- auto options = torch::TensorOptions().dtype(torch::kFloat16).device(W.device());
- at::Tensor DW = torch::empty({m, n}, options);
- cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
- const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(type);
- to_fp16_cuda(
- (void*)W.data_ptr(), (half*)DW.data_ptr(), m * n, stream
- );
- return DW;
- }
- // New quantizations doesn't implement ggml_mul_mat_vec and only use ggml_mul_mat_vec_a8
- torch::Tensor ggml_mul_mat_vec(
- torch::Tensor W, // quant weight
- torch::Tensor X, // input
- int8_t type,
- int64_t row
- ){
- size_t col = X.sizes()[1];
- const at::cuda::OptionalCUDAGuard device_guard(device_of(X));
- auto options = torch::TensorOptions().dtype(torch::kFloat16).device(W.device());
- at::Tensor Y = torch::empty({1, row}, options);
- cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
- switch (type) {
- case 2:
- dequantize_mul_mat_vec_q4_0_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 3:
- dequantize_mul_mat_vec_q4_1_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 6:
- dequantize_mul_mat_vec_q5_0_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 7:
- dequantize_mul_mat_vec_q5_1_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 8:
- dequantize_mul_mat_vec_q8_0_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 10:
- dequantize_mul_mat_vec_q2_K_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 11:
- dequantize_mul_mat_vec_q3_K_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 12:
- dequantize_mul_mat_vec_q4_K_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 13:
- dequantize_mul_mat_vec_q5_K_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 14:
- dequantize_mul_mat_vec_q6_K_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 16:
- dequantize_mul_mat_vec_iq2_xxs_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 17:
- dequantize_mul_mat_vec_iq2_xs_cuda((void*)W.data_ptr(), (half*)X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- }
- return Y;
- }
- torch::Tensor ggml_mul_mat_vec_a8(
- torch::Tensor W, // quant weight
- torch::Tensor X, // input
- int8_t type,
- int64_t row
- ){
- int col = X.sizes()[1];
- const int padded = (col + 512 - 1) / 512 * 512;
- const at::cuda::OptionalCUDAGuard device_guard(device_of(X));
- auto options = torch::TensorOptions().dtype(torch::kFloat16).device(W.device());
- at::Tensor Y = torch::empty({1, row}, options);
- cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
- options = torch::TensorOptions().dtype(torch::kInt32).device(W.device());
- at::Tensor quant_X = torch::empty({1, padded / 32 * 9}, options);
- quantize_row_q8_1_cuda((half*)X.data_ptr(), (void*)quant_X.data_ptr(), col, 1, stream);
- switch (type) {
- case 2:
- mul_mat_vec_q4_0_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 3:
- mul_mat_vec_q4_1_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 6:
- mul_mat_vec_q5_0_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 7:
- mul_mat_vec_q5_1_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 8:
- mul_mat_vec_q8_0_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 10:
- mul_mat_vec_q2_K_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 11:
- mul_mat_vec_q3_K_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 12:
- mul_mat_vec_q4_K_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 13:
- mul_mat_vec_q5_K_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 14:
- mul_mat_vec_q6_K_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 16:
- mul_mat_vec_iq2_xxs_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 17:
- mul_mat_vec_iq2_xs_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 18:
- mul_mat_vec_iq3_xxs_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 19:
- mul_mat_vec_iq1_s_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 20:
- mul_mat_vec_iq4_nl_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 21:
- mul_mat_vec_iq3_s_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 22:
- mul_mat_vec_iq2_s_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- case 23:
- mul_mat_vec_iq4_xs_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, stream);
- break;
- }
- return Y;
- }
- torch::Tensor ggml_mul_mat_a8(
- torch::Tensor W, // quant weight
- torch::Tensor X, // input
- int8_t type,
- int64_t row
- ) {
- int col = X.sizes()[1];
- int padded = (col + 512 - 1) / 512 * 512;
- int batch = X.sizes()[0];
- const at::cuda::OptionalCUDAGuard device_guard(device_of(X));
- auto options = torch::TensorOptions().dtype(torch::kFloat16).device(W.device());
- at::Tensor Y = torch::empty({batch, row}, options);
- cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
- options = torch::TensorOptions().dtype(torch::kInt32).device(W.device());
- at::Tensor quant_X = torch::empty({batch, padded / 32 * 9}, options);
- quantize_row_q8_1_cuda((half*)X.data_ptr(), (void*)quant_X.data_ptr(), col, batch, stream);
- switch (type) {
- case 2:
- ggml_mul_mat_q4_0_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, batch, padded, row, stream);
- break;
- case 3:
- ggml_mul_mat_q4_1_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, batch, padded, row, stream);
- break;
- case 6:
- ggml_mul_mat_q5_0_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, batch, padded, row, stream);
- break;
- case 7:
- ggml_mul_mat_q5_1_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, batch, padded, row, stream);
- break;
- case 8:
- ggml_mul_mat_q8_0_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, batch, padded, row, stream);
- break;
- case 10:
- ggml_mul_mat_q2_K_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, batch, padded, row, stream);
- break;
- case 11:
- ggml_mul_mat_q3_K_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, batch, padded, row, stream);
- break;
- case 12:
- ggml_mul_mat_q4_K_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, batch, padded, row, stream);
- break;
- case 13:
- ggml_mul_mat_q5_K_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, batch, padded, row, stream);
- break;
- case 14:
- ggml_mul_mat_q6_K_q8_1_cuda((void*)W.data_ptr(), (void*)quant_X.data_ptr(), (half*)Y.data_ptr(), col, row, batch, padded, row, stream);
- break;
- }
- return Y;
- }
|