test_flash_attn.py 96 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525
  1. import math
  2. import pytest
  3. import torch
  4. import torch.nn.functional as F
  5. from einops import rearrange, repeat
  6. from flash_attn import (
  7. flash_attn_func,
  8. flash_attn_kvpacked_func,
  9. flash_attn_qkvpacked_func,
  10. flash_attn_varlen_func,
  11. flash_attn_varlen_kvpacked_func,
  12. flash_attn_varlen_qkvpacked_func,
  13. flash_attn_with_kvcache,
  14. )
  15. from flash_attn.bert_padding import pad_input, unpad_input
  16. from flash_attn.flash_attn_interface import _get_block_size_n
  17. from flash_attn.layers.rotary import apply_rotary_emb
  18. MAX_HEADDIM_SM8x = 192
  19. is_sm75 = torch.cuda.get_device_capability("cuda") == (7, 5)
  20. is_sm8x = torch.cuda.get_device_capability("cuda")[0] == 8
  21. is_sm80 = torch.cuda.get_device_capability("cuda") == (8, 0)
  22. is_sm90 = torch.cuda.get_device_capability("cuda") == (9, 0)
  23. def attn_bias_from_alibi_slopes(
  24. slopes, seqlen_q, seqlen_k, query_padding_mask=None, key_padding_mask=None, causal=False, key_leftpad=None
  25. ):
  26. batch, nheads = slopes.shape
  27. device = slopes.device
  28. slopes = rearrange(slopes, "b h -> b h 1 1")
  29. if causal:
  30. return torch.arange(-seqlen_k + 1, 1, device=device, dtype=torch.float32) * slopes
  31. else:
  32. row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
  33. col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
  34. if key_leftpad is not None:
  35. key_leftpad = rearrange(key_leftpad, "b -> b 1 1 1")
  36. col_idx = repeat(col_idx, "s -> b 1 1 s", b=key_leftpad.shape[0])
  37. col_idx = torch.where(col_idx >= key_leftpad, col_idx - key_leftpad, 2**32)
  38. sk = (
  39. seqlen_k
  40. if key_padding_mask is None
  41. else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
  42. )
  43. sq = (
  44. seqlen_q
  45. if query_padding_mask is None
  46. else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
  47. )
  48. relative_pos = torch.abs(row_idx + sk - sq - col_idx)
  49. return -slopes * relative_pos.to(dtype=slopes.dtype)
  50. def generate_random_padding_mask(max_seqlen, batch_size, device, mode="random"):
  51. assert mode in ["full", "random", "third"]
  52. if mode == "full":
  53. lengths = torch.full((batch_size, 1), max_seqlen, device=device, dtype=torch.int32)
  54. elif mode == "random":
  55. lengths = torch.randint(
  56. max(1, max_seqlen - 20), max_seqlen + 1, (batch_size, 1), device=device
  57. )
  58. elif mode == "third":
  59. lengths = torch.randint(max_seqlen // 3, max_seqlen + 1, (batch_size, 1), device=device)
  60. padding_mask = (
  61. repeat(torch.arange(max_seqlen, device=device), "s -> b s", b=batch_size) < lengths
  62. )
  63. return padding_mask
  64. def generate_qkv(
  65. q, k, v, query_padding_mask=None, key_padding_mask=None, kvpacked=False, qkvpacked=False
  66. ):
  67. """
  68. Arguments:
  69. q: (batch_size, seqlen_q, nheads, d)
  70. k: (batch_size, seqlen_k, nheads_k, d)
  71. v: (batch_size, seqlen_k, nheads_k, d)
  72. query_padding_mask: (batch_size, seqlen), bool
  73. key_padding_mask: (batch_size, seqlen), bool
  74. """
  75. assert not (kvpacked and qkvpacked)
  76. batch_size, seqlen_q, nheads, d = q.shape
  77. _, seqlen_k, nheads_k, _ = k.shape
  78. assert k.shape == (batch_size, seqlen_k, nheads_k, d)
  79. assert v.shape == (batch_size, seqlen_k, nheads_k, d)
  80. if query_padding_mask is not None:
  81. q_unpad, indices_q, cu_seqlens_q, max_seqlen_q, _ = unpad_input(q, query_padding_mask)
  82. output_pad_fn = lambda output_unpad: pad_input(
  83. output_unpad, indices_q, batch_size, seqlen_q
  84. )
  85. else:
  86. q_unpad = rearrange(q, "b s h d -> (b s) h d")
  87. cu_seqlens_q = torch.arange(
  88. 0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, device=q_unpad.device
  89. )
  90. max_seqlen_q = seqlen_q
  91. output_pad_fn = lambda output_unpad: rearrange(
  92. output_unpad, "(b s) h d -> b s h d", b=batch_size
  93. )
  94. if key_padding_mask is not None:
  95. k_unpad, indices_k, cu_seqlens_k, max_seqlen_k, _ = unpad_input(k, key_padding_mask)
  96. v_unpad, _, _, _, _ = unpad_input(v, key_padding_mask)
  97. else:
  98. k_unpad = rearrange(k, "b s h d -> (b s) h d")
  99. v_unpad = rearrange(v, "b s h d -> (b s) h d")
  100. cu_seqlens_k = torch.arange(
  101. 0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, device=k_unpad.device
  102. )
  103. max_seqlen_k = seqlen_k
  104. if qkvpacked:
  105. assert (query_padding_mask == key_padding_mask).all()
  106. assert nheads == nheads_k
  107. qkv_unpad = torch.stack([q_unpad, k_unpad, v_unpad], dim=1)
  108. qkv = torch.stack([q, k, v], dim=2)
  109. if query_padding_mask is not None:
  110. dqkv_pad_fn = lambda dqkv_unpad: pad_input(dqkv_unpad, indices_q, batch_size, seqlen_q)
  111. else:
  112. dqkv_pad_fn = lambda dqkv_unpad: rearrange(
  113. dqkv_unpad, "(b s) t h d -> b s t h d", b=batch_size
  114. )
  115. return (
  116. qkv_unpad.detach().requires_grad_(),
  117. cu_seqlens_q,
  118. max_seqlen_q,
  119. qkv.detach().requires_grad_(),
  120. output_pad_fn,
  121. dqkv_pad_fn,
  122. )
  123. elif kvpacked:
  124. kv_unpad = torch.stack([k_unpad, v_unpad], dim=1)
  125. kv = torch.stack([k, v], dim=2)
  126. dq_pad_fn = output_pad_fn
  127. if key_padding_mask is not None:
  128. dkv_pad_fn = lambda dkv_unpad: pad_input(dkv_unpad, indices_k, batch_size, seqlen_k)
  129. else:
  130. dkv_pad_fn = lambda dkv_unpad: rearrange(
  131. dkv_unpad, "(b s) t h d -> b s t h d", b=batch_size
  132. )
  133. return (
  134. q_unpad.detach().requires_grad_(),
  135. kv_unpad.detach().requires_grad_(),
  136. cu_seqlens_q,
  137. cu_seqlens_k,
  138. max_seqlen_q,
  139. max_seqlen_k,
  140. q.detach().requires_grad_(),
  141. kv.detach().requires_grad_(),
  142. output_pad_fn,
  143. dq_pad_fn,
  144. dkv_pad_fn,
  145. )
  146. else:
  147. dq_pad_fn = output_pad_fn
  148. if key_padding_mask is not None:
  149. dk_pad_fn = lambda dk_unpad: pad_input(dk_unpad, indices_k, batch_size, seqlen_k)
  150. else:
  151. dk_pad_fn = lambda dk_unpad: rearrange(dk_unpad, "(b s) h d -> b s h d", b=batch_size)
  152. return (
  153. q_unpad.detach().requires_grad_(),
  154. k_unpad.detach().requires_grad_(),
  155. v_unpad.detach().requires_grad_(),
  156. cu_seqlens_q,
  157. cu_seqlens_k,
  158. max_seqlen_q,
  159. max_seqlen_k,
  160. q.detach().requires_grad_(),
  161. k.detach().requires_grad_(),
  162. v.detach().requires_grad_(),
  163. output_pad_fn,
  164. dq_pad_fn,
  165. dk_pad_fn,
  166. )
  167. def construct_local_mask(
  168. seqlen_q,
  169. seqlen_k,
  170. window_size=(-1, -1), # -1 means infinite window size
  171. query_padding_mask=None,
  172. key_padding_mask=None,
  173. device=None,
  174. key_leftpad=None,
  175. ):
  176. row_idx = rearrange(torch.arange(seqlen_q, device=device, dtype=torch.long), "s -> s 1")
  177. col_idx = torch.arange(seqlen_k, device=device, dtype=torch.long)
  178. if key_leftpad is not None:
  179. key_leftpad = rearrange(key_leftpad, "b -> b 1 1 1")
  180. col_idx = repeat(col_idx, "s -> b 1 1 s", b=key_leftpad.shape[0])
  181. col_idx = torch.where(col_idx >= key_leftpad, col_idx - key_leftpad, 2**32)
  182. sk = (
  183. seqlen_k
  184. if key_padding_mask is None
  185. else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
  186. )
  187. sq = (
  188. seqlen_q
  189. if query_padding_mask is None
  190. else rearrange(query_padding_mask.sum(-1), "b -> b 1 1 1")
  191. )
  192. if window_size[0] < 0:
  193. return col_idx > row_idx + sk - sq + window_size[1]
  194. else:
  195. sk = torch.full_like(col_idx, seqlen_k) if key_padding_mask is None else sk
  196. return torch.logical_or(
  197. col_idx > torch.minimum(row_idx + sk - sq + window_size[1], sk),
  198. col_idx < row_idx + sk - sq - window_size[0],
  199. )
  200. def attention_ref(
  201. q,
  202. k,
  203. v,
  204. query_padding_mask=None,
  205. key_padding_mask=None,
  206. attn_bias=None,
  207. dropout_p=0.0,
  208. dropout_mask=None,
  209. causal=False,
  210. window_size=(-1, -1), # -1 means infinite window size
  211. softcap=0.0,
  212. upcast=True,
  213. reorder_ops=False,
  214. key_leftpad=None,
  215. ):
  216. """
  217. Arguments:
  218. q: (batch_size, seqlen_q, nheads, head_dim)
  219. k: (batch_size, seqlen_k, nheads_k, head_dim)
  220. v: (batch_size, seqlen_k, nheads_k, head_dim)
  221. query_padding_mask: (batch_size, seqlen_q)
  222. key_padding_mask: (batch_size, seqlen_k)
  223. attn_bias: broadcastable to (batch_size, nheads, seqlen_q, seqlen_k)
  224. dropout_p: float
  225. dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k)
  226. causal: whether to apply causal masking
  227. window_size: (int, int), left and right window size
  228. upcast: whether to cast all inputs to fp32, do all computation in fp32, then cast
  229. output back to fp16/bf16.
  230. reorder_ops: whether to change the order of operations (scaling k instead of scaling q, etc.)
  231. without changing the math. This is to estimate the numerical error from operation
  232. reordering.
  233. Output:
  234. output: (batch_size, seqlen_q, nheads, head_dim)
  235. attention: (batch_size, nheads, seqlen_q, seqlen_k), softmax after dropout
  236. """
  237. if causal:
  238. window_size = (window_size[0], 0)
  239. dtype_og = q.dtype
  240. if upcast:
  241. q, k, v = q.float(), k.float(), v.float()
  242. seqlen_q, seqlen_k = q.shape[1], k.shape[1]
  243. k = repeat(k, "b s h d -> b s (h g) d", g=q.shape[2] // k.shape[2])
  244. v = repeat(v, "b s h d -> b s (h g) d", g=q.shape[2] // v.shape[2])
  245. d = q.shape[-1]
  246. if not reorder_ops:
  247. scores = torch.einsum("bthd,bshd->bhts", q / math.sqrt(d), k)
  248. else:
  249. scores = torch.einsum("bthd,bshd->bhts", q, k / math.sqrt(d))
  250. if softcap > 0:
  251. scores = scores / softcap
  252. scores = scores.tanh()
  253. scores = scores * softcap
  254. if key_padding_mask is not None:
  255. scores.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
  256. if window_size[0] >= 0 or window_size[1] >= 0:
  257. local_mask = construct_local_mask(
  258. seqlen_q,
  259. seqlen_k,
  260. window_size,
  261. query_padding_mask,
  262. key_padding_mask,
  263. q.device,
  264. key_leftpad=key_leftpad,
  265. )
  266. scores.masked_fill_(local_mask, float("-inf"))
  267. if attn_bias is not None:
  268. scores = scores + attn_bias
  269. attention = torch.softmax(scores, dim=-1).to(v.dtype)
  270. # Some rows might be completely masked out so we fill them with zero instead of NaN
  271. if window_size[0] >= 0 or window_size[1] >= 0:
  272. attention = attention.masked_fill(torch.all(local_mask, dim=-1, keepdim=True), 0.0)
  273. # We want to mask here so that the attention matrix doesn't have any NaNs
  274. # Otherwise we'll get NaN in dV
  275. if query_padding_mask is not None:
  276. attention = attention.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
  277. dropout_scaling = 1.0 / (1 - dropout_p)
  278. # attention_drop = attention.masked_fill(~dropout_mask, 0.0) * dropout_scaling
  279. # output = torch.einsum('bhts,bshd->bthd', attention_drop , v)
  280. if dropout_mask is not None:
  281. attention_drop = attention.masked_fill(~dropout_mask, 0.0)
  282. else:
  283. attention_drop = attention
  284. output = torch.einsum("bhts,bshd->bthd", attention_drop, v * dropout_scaling)
  285. if query_padding_mask is not None:
  286. output.masked_fill_(rearrange(~query_padding_mask, "b s -> b s 1 1"), 0.0)
  287. return output.to(dtype=dtype_og), attention.to(dtype=dtype_og)
  288. def attention_kvpacked_ref(
  289. q,
  290. kv,
  291. query_padding_mask=None,
  292. key_padding_mask=None,
  293. attn_bias=None,
  294. dropout_p=0.0,
  295. dropout_mask=None,
  296. causal=False,
  297. window_size=(-1, -1), # -1 means infinite window size
  298. softcap=0.0,
  299. upcast=True,
  300. reorder_ops=False,
  301. key_leftpad=None,
  302. ):
  303. return attention_ref(
  304. q,
  305. kv[:, :, 0],
  306. kv[:, :, 1],
  307. query_padding_mask,
  308. key_padding_mask,
  309. attn_bias,
  310. dropout_p,
  311. dropout_mask,
  312. upcast=upcast,
  313. causal=causal,
  314. window_size=window_size,
  315. softcap=softcap,
  316. reorder_ops=reorder_ops,
  317. key_leftpad=key_leftpad,
  318. )
  319. def attention_qkvpacked_ref(
  320. qkv,
  321. key_padding_mask=None,
  322. attn_bias=None,
  323. dropout_p=0.0,
  324. dropout_mask=None,
  325. causal=False,
  326. window_size=(-1, -1), # -1 means infinite window size
  327. softcap=0.0,
  328. upcast=True,
  329. reorder_ops=False,
  330. ):
  331. return attention_ref(
  332. qkv[:, :, 0],
  333. qkv[:, :, 1],
  334. qkv[:, :, 2],
  335. key_padding_mask,
  336. key_padding_mask,
  337. attn_bias,
  338. dropout_p,
  339. dropout_mask,
  340. upcast=upcast,
  341. causal=causal,
  342. window_size=window_size,
  343. softcap=softcap,
  344. reorder_ops=reorder_ops,
  345. )
  346. def generate_sparsity_mask(seqlen, sparsity=0.3):
  347. repeats = seqlen // 16 // 2
  348. # mask = torch.stack([torch.tensor([1, 0] * repeats, dtype=torch.bool, device='cuda'),
  349. # torch.tensor([0, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
  350. # mask = torch.stack([torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda'),
  351. # torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
  352. # mask = torch.stack([torch.tensor([1, 1] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
  353. # mask = torch.stack([torch.tensor([1, 0] * repeats, dtype=torch.bool, device='cuda')], dim=-1)
  354. nrow, ncol = seqlen // 16, seqlen // 256
  355. mask = torch.rand(nrow, ncol, device="cuda") < sparsity
  356. return mask
  357. def attention_blocksparse_ref(qkv, blockmask, attn_mask, dropout_p, dropout_mask):
  358. """
  359. Arguments:
  360. qkv: (batch_size, seqlen, 3, nheads, head_dim)
  361. blockmask: (seqlen / 16, seqlen / 256)
  362. attn_mask: (batch_size, seqlen)
  363. dropout_p: float
  364. dropout_mask: (batch_size, nheads, seqlen, seqlen)
  365. Output:
  366. output: (batch_size, seqlen, nheads, head_dim)
  367. attention: softmax after dropout
  368. """
  369. q, k, v = qkv.float().unbind(dim=2)
  370. d = qkv.shape[-1]
  371. seqlen = qkv.shape[1]
  372. scores = torch.einsum("bthd,bshd->bhts", q / math.sqrt(d), k)
  373. scores.masked_fill_(rearrange(~attn_mask, "b s -> b 1 1 s"), float("-inf"))
  374. blockmask = repeat(blockmask, "s_16 s_256 -> (s_16 16) (s_256 256)")
  375. blockmask = blockmask[:seqlen, :seqlen]
  376. scores.masked_fill_(rearrange(~blockmask, "t s -> 1 1 t s"), float("-inf"))
  377. attention = torch.softmax(scores, dim=-1)
  378. attention = attention.masked_fill(rearrange(~attn_mask, "b s -> b 1 s 1"), 0.0)
  379. attention = attention.masked_fill_(rearrange(~blockmask, "t s -> 1 1 t s"), 0.0)
  380. attention_drop = attention.masked_fill(~dropout_mask, 0.0) / (1 - dropout_p)
  381. output = torch.einsum("bhts,bshd->bthd", attention_drop, v)
  382. output.masked_fill_(rearrange(~attn_mask, "b s -> b s 1 1"), 0)
  383. return output.to(dtype=qkv.dtype), attention.to(dtype=qkv.dtype)
  384. def convert_flash_attn_S_to_softmax(
  385. S,
  386. seqlen_q,
  387. seqlen_k,
  388. query_padding_mask,
  389. key_padding_mask,
  390. head_dim,
  391. is_dropout,
  392. causal=False,
  393. window_size=(-1, -1), # -1 means infinite window size
  394. ):
  395. """FlashAttention stores the S matrix in a different way.
  396. Arguments:
  397. S: (batch_size, nheads, seqlen_q_rounded, seqlen_k_rounded)
  398. query_padding_mask: (batch_size, seqlen_q_rounded)
  399. key_padding_mask: (batch_size, seqlen_k_rounded)
  400. """
  401. if causal:
  402. window_size = (window_size[0], 0)
  403. seqlen_q_rounded, seqlen_k_rounded = S.shape[-2:]
  404. S_converted = S
  405. if window_size[0] >= 0 or window_size[1] >= 0:
  406. local_mask = construct_local_mask(
  407. seqlen_q,
  408. seqlen_k,
  409. window_size,
  410. query_padding_mask,
  411. key_padding_mask,
  412. S.device,
  413. )
  414. local_mask = F.pad(
  415. local_mask,
  416. (0, seqlen_k_rounded - seqlen_k, 0, seqlen_q_rounded - seqlen_q),
  417. value=True,
  418. )
  419. S_converted = S_converted.masked_fill(local_mask, 0.0)
  420. # Need to zero out things not in attention_mask in case S was initialized with random values
  421. # and some of those values aren't overwritten.
  422. seqlen_q_og = (
  423. query_padding_mask.shape[-1] if query_padding_mask is not None else seqlen_q_rounded
  424. )
  425. if query_padding_mask is not None:
  426. query_padding_mask = F.pad(query_padding_mask, (0, seqlen_q_rounded - seqlen_q_og))
  427. S_converted = S_converted.masked_fill(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
  428. seqlen_k_og = key_padding_mask.shape[-1] if key_padding_mask is not None else seqlen_k
  429. if key_padding_mask is not None:
  430. key_padding_mask = F.pad(key_padding_mask, (0, seqlen_k_rounded - seqlen_k_og))
  431. S_converted = S_converted.masked_fill(rearrange(~key_padding_mask, "b s -> b 1 1 s"), 0.0)
  432. S_converted = F.pad(S_converted, (0, 0, 0, seqlen_q_og - seqlen_q_rounded))
  433. S_converted = F.pad(S_converted, (0, seqlen_k_og - seqlen_k_rounded))
  434. return S_converted[:, :, :seqlen_q, :seqlen_k]
  435. def normalize_flash_attn_S(
  436. attn_unnorm,
  437. q,
  438. k,
  439. v,
  440. query_padding_mask=None,
  441. key_padding_mask=None,
  442. attn_bias=None,
  443. is_dropout=False,
  444. causal=False,
  445. window_size=(-1, -1), # -1 means infinite window size
  446. ):
  447. """
  448. Arguments:
  449. q: (batch_size, seqlen_q, nheads, head_dim)
  450. k, v: (batch_size, seqlen_k, nheads, head_dim)
  451. key_padding_mask: (batch_size, seqlen_q)
  452. attn_bias: broadcastable to (batch_size, nheads, seqlen_q, seqlen_k)
  453. Output:
  454. softmax_lse: (batch_size, nheads, seqlen_q)
  455. softmax_max: (batch_size, nheads, seqlen_q)
  456. """
  457. if causal:
  458. window_size = (window_size[0], 0)
  459. q, k, v = q.float(), k.float(), v.float()
  460. _, seqlen_q, _, head_dim = q.shape
  461. seqlen_k = k.shape[1]
  462. scores = torch.einsum("bthd,bshd->bhts", q / math.sqrt(head_dim), k)
  463. if key_padding_mask is not None:
  464. scores.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
  465. if window_size[0] >= 0 or window_size[1] >= 0:
  466. local_mask = construct_local_mask(
  467. seqlen_q,
  468. seqlen_k,
  469. window_size,
  470. query_padding_mask,
  471. key_padding_mask,
  472. q.device,
  473. )
  474. scores.masked_fill_(local_mask, float("-inf"))
  475. if attn_bias is not None:
  476. scores = scores + attn_bias.to(dtype=scores.dtype)
  477. block_size_n = _get_block_size_n(scores.device, head_dim, is_dropout, causal)
  478. scores_block = scores.split(block_size_n, dim=-1)
  479. lse_block = torch.stack([torch.logsumexp(s, dim=-1) for s in scores_block], dim=-1)
  480. lse = torch.logsumexp(lse_block, dim=-1)
  481. # lse could be -inf (i.e. all values in scores are -inf), and we want to set those to inf
  482. # so that when we do torch.exp(m - lse), we get 0.0 instead of NaN.
  483. lse[lse == float("-inf")] = float("inf")
  484. scores_max_block = torch.stack([torch.amax(s, dim=-1) for s in scores_block], dim=-1)
  485. cummax_block = torch.cummax(scores_max_block.flip(-1), dim=-1).values.flip(-1).unbind(dim=-1)
  486. attn_unnorm_block = attn_unnorm.split(block_size_n, dim=-1)
  487. attn_norm = torch.cat(
  488. [
  489. a * rearrange(torch.exp(m - lse), "b h s -> b h s 1")
  490. for a, m in zip(attn_unnorm_block, cummax_block)
  491. ],
  492. dim=-1,
  493. )
  494. if query_padding_mask is not None:
  495. attn_norm.masked_fill_(rearrange(~query_padding_mask, "b s -> b 1 s 1"), 0.0)
  496. return attn_norm.to(dtype=attn_unnorm.dtype)
  497. def get_dropout_fraction(
  498. dropout_mask,
  499. query_padding_mask=None,
  500. key_padding_mask=None,
  501. causal=False,
  502. window_size=(-1, -1), # -1 means infinite window size
  503. ):
  504. """
  505. dropout_mask: (batch_size, nheads, seqlen_q, seqlen_k), bool. True means keep, False means drop.
  506. query_padding_mask: (batch_size, seqlen_q)
  507. key_padding_mask: (batch_size, seqlen_k)
  508. """
  509. if causal:
  510. window_size = (window_size[0], 0)
  511. batch_size, nheads, seqlen_q, seqlen_k = dropout_mask.shape
  512. dropped = ~dropout_mask
  513. valid = torch.ones_like(dropout_mask)
  514. if query_padding_mask is not None:
  515. dropped.masked_fill_(rearrange(~query_padding_mask, "b s -> b 1 s 1"), False)
  516. valid.masked_fill_(rearrange(~query_padding_mask, "b s -> b 1 s 1"), False)
  517. if key_padding_mask is not None:
  518. dropped.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), False)
  519. valid.masked_fill_(rearrange(~key_padding_mask, "b s -> b 1 1 s"), False)
  520. if window_size[0] >= 0 or window_size[1] >= 0:
  521. local_mask = construct_local_mask(
  522. seqlen_q,
  523. seqlen_k,
  524. window_size,
  525. query_padding_mask,
  526. key_padding_mask,
  527. dropout_mask.device,
  528. )
  529. dropped.masked_fill_(local_mask, False)
  530. valid.masked_fill_(local_mask, False)
  531. dropped_total = dropped.sum()
  532. return dropped.sum() / valid.sum()
  533. @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  534. # @pytest.mark.parametrize("dtype", [torch.float16])
  535. @pytest.mark.parametrize("deterministic", [False, True])
  536. # @pytest.mark.parametrize("deterministic", [False])
  537. @pytest.mark.parametrize("alibi", [False, True])
  538. # @pytest.mark.parametrize("alibi", [False])
  539. @pytest.mark.parametrize("local", [False, True])
  540. # @pytest.mark.parametrize("local", [False])
  541. @pytest.mark.parametrize("causal", [False, True])
  542. # @pytest.mark.parametrize("causal", [False])
  543. @pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
  544. # @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
  545. # @pytest.mark.parametrize('d', [32, 64, 96, 128])
  546. # @pytest.mark.parametrize("d", [64])
  547. # @pytest.mark.parametrize('seqlen', [128, 256, 384, 512, 768, 1024, 2048])
  548. @pytest.mark.parametrize("seqlen", [97, 128, 200, 384, 768, 1024, 1025, 2048])
  549. # @pytest.mark.parametrize("seqlen", [512])
  550. @pytest.mark.parametrize("dropout_p", [0.0, 0.17])
  551. # @pytest.mark.parametrize("dropout_p", [0.0])
  552. def test_flash_attn_qkvpacked(seqlen, d, dropout_p, causal, local, alibi, deterministic, dtype):
  553. if seqlen >= 2048 and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30:
  554. pytest.skip() # Reference implementation OOM
  555. device = "cuda"
  556. # set seed
  557. torch.random.manual_seed(0)
  558. batch_size = 4
  559. nheads = 9
  560. window_size = (-1, -1) if not local else torch.randint(0, seqlen, (2,))
  561. qkv = torch.randn(
  562. batch_size, seqlen, 3, nheads, d, device=device, dtype=dtype, requires_grad=True
  563. )
  564. if alibi:
  565. alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
  566. attn_bias = attn_bias_from_alibi_slopes(alibi_slopes, seqlen, seqlen, causal=causal)
  567. else:
  568. alibi_slopes, attn_bias = None, None
  569. out, lse, S_dmask = flash_attn_qkvpacked_func(
  570. qkv,
  571. dropout_p,
  572. causal=causal,
  573. window_size=window_size,
  574. alibi_slopes=alibi_slopes,
  575. deterministic=deterministic,
  576. return_attn_probs=True,
  577. )
  578. if dropout_p > 0.0:
  579. S_dmask_converted = convert_flash_attn_S_to_softmax(
  580. S_dmask,
  581. seqlen,
  582. seqlen,
  583. None,
  584. None,
  585. d,
  586. dropout_p > 0.0,
  587. causal=causal,
  588. window_size=window_size,
  589. )
  590. dropout_mask = S_dmask_converted >= 0
  591. attn_unnorm = S_dmask_converted.abs()
  592. attn = normalize_flash_attn_S(
  593. attn_unnorm,
  594. qkv[:, :, 0],
  595. qkv[:, :, 1],
  596. qkv[:, :, 2],
  597. None,
  598. None,
  599. attn_bias,
  600. dropout_p > 0.0,
  601. causal=causal,
  602. window_size=window_size,
  603. )
  604. dropout_fraction = get_dropout_fraction(
  605. dropout_mask, None, None, causal=causal, window_size=window_size
  606. ).item()
  607. print(f"Actual dropout fraction: {dropout_fraction}")
  608. else:
  609. dropout_mask = None
  610. out_ref, attn_ref = attention_qkvpacked_ref(
  611. qkv, None, attn_bias, dropout_p, dropout_mask, causal=causal, window_size=window_size
  612. )
  613. out_pt, attn_pt = attention_qkvpacked_ref(
  614. qkv,
  615. None,
  616. attn_bias,
  617. dropout_p,
  618. dropout_mask,
  619. causal=causal,
  620. window_size=window_size,
  621. upcast=False,
  622. reorder_ops=True,
  623. )
  624. # v = qkv[:, :, 2].float()
  625. # qk = torch.einsum('bshd,bthd->bhst', qkv[:, :, 0], qkv[:, :, 1]).float()
  626. # if causal:
  627. # causal_mask = torch.triu(torch.ones(seqlen, seqlen, dtype=torch.bool, device=qkv.device), 1)
  628. # qk.masked_fill_(causal_mask, float('-inf'))
  629. # m = qk.amax(-1, keepdim=True)
  630. # s_tmp = torch.exp((qk - m) / math.sqrt(d))
  631. # p_tmp = torch.softmax(qk / math.sqrt(d), -1)
  632. # p_dropped = p_tmp if dropout_mask is None else p_tmp.masked_fill(~dropout_mask, 0)
  633. # lse_ref = torch.logsumexp(qk / math.sqrt(d), -1)
  634. # qk_max1 = torch.max(qk[:, :, 128:, 192:], -1, keepdim=True).values
  635. # qk_max2 = torch.max(qk[:, :, 128:, 128:], -1, keepdim=True).values
  636. # qk_max3 = torch.max(qk[:, :, 128:, 64:], -1, keepdim=True).values
  637. # qk_max4 = torch.max(qk[:, :, 128:, :], -1, keepdim=True).values
  638. # o1 = torch.einsum('bhst,bthd->bshd', torch.exp((qk[:, :, 128:, 192:] - qk_max1) / math.sqrt(d)), v[:, 192:])
  639. # o2 = torch.einsum('bhst,bthd->bshd', torch.exp((qk[:, :, 128:, 128:] - qk_max2) / math.sqrt(d)), v[:, 128:])
  640. # o3 = torch.einsum('bhst,bthd->bshd', torch.exp((qk[:, :, 128:, 64:] - qk_max3) / math.sqrt(d)), v[:, 64:])
  641. # o4 = torch.einsum('bhst,bthd->bshd', torch.exp((qk[:, :, 128:, :] - qk_max4) / math.sqrt(d)), v[:, :])
  642. print(f"Output max diff: {(out - out_ref).abs().max().item()}")
  643. print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
  644. print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
  645. print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
  646. if dropout_p > 0.0:
  647. print(f"Attention max diff: {(attn - attn_ref).abs().max().item()}")
  648. print(f"Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}")
  649. g = torch.randn_like(out)
  650. # do_o = (g.float() * out.float()).sum(-1)
  651. # dv_tmp = torch.einsum('bhts,bthd->bshd', attn_pt[:, :, :64], g[:, :64])
  652. # dv_tmp1 = torch.einsum('bhts,bthd->bshd', attn_pt[:, :, 64:], g[:, 64:])
  653. if (d <= MAX_HEADDIM_SM8x or dropout_p == 0) or (is_sm80 or is_sm90):
  654. (dqkv,) = torch.autograd.grad(out, qkv, g)
  655. (dqkv_ref,) = torch.autograd.grad(out_ref, qkv, g)
  656. (dqkv_pt,) = torch.autograd.grad(out_pt, qkv, g)
  657. print(f"dQ max diff: {(dqkv[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}")
  658. print(f"dK max diff: {(dqkv[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}")
  659. print(f"dV max diff: {(dqkv[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}")
  660. print(f"dQKV mean diff: {(dqkv - dqkv_ref).abs().mean().item()}")
  661. print(f"dQ Pytorch max diff: {(dqkv_pt[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}")
  662. print(f"dK Pytorch max diff: {(dqkv_pt[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}")
  663. print(f"dV Pytorch max diff: {(dqkv_pt[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}")
  664. print(f"dQKV Pytorch mean diff: {(dqkv_pt - dqkv_ref).abs().mean().item()}")
  665. # Check that FlashAttention's numerical error is at most twice the numerical error
  666. # of a Pytorch implementation.
  667. assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()
  668. if dropout_p > 0.0:
  669. assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
  670. # With alibi, many of the prob values are 0.0 & -0.0 so dropout_fraction isn't accurate
  671. if not alibi:
  672. assert abs(dropout_fraction - dropout_p) <= (0.01 if not local else 0.025)
  673. if (d <= MAX_HEADDIM_SM8x or dropout_p == 0) or (is_sm80 or is_sm90):
  674. assert (dqkv - dqkv_ref).abs().max().item() <= 2 * (dqkv_pt - dqkv_ref).abs().max().item()
  675. @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  676. # @pytest.mark.parametrize('dtype', [torch.float16])
  677. @pytest.mark.parametrize("deterministic", [False, True])
  678. # @pytest.mark.parametrize("deterministic", [True])
  679. @pytest.mark.parametrize("alibi", [False, True])
  680. # @pytest.mark.parametrize("alibi", [True])
  681. @pytest.mark.parametrize("local", [False, True])
  682. # @pytest.mark.parametrize("local", [True])
  683. @pytest.mark.parametrize("causal", [False, True])
  684. # @pytest.mark.parametrize('causal', [False])
  685. @pytest.mark.parametrize("d", [32, 59, 64, 80, 96, 128, 160, 192, 224, 256])
  686. # @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
  687. # @pytest.mark.parametrize('d', [64])
  688. @pytest.mark.parametrize("seqlen", [97, 128, 200, 257, 384, 512, 768, 1025, 2048])
  689. # @pytest.mark.parametrize('seqlen', [128])
  690. @pytest.mark.parametrize("dropout_p", [0.0, 0.17])
  691. # @pytest.mark.parametrize('dropout_p', [0.0])
  692. def test_flash_attn_varlen_qkvpacked(
  693. seqlen, d, dropout_p, causal, local, alibi, deterministic, dtype
  694. ):
  695. if seqlen >= 2048 and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30:
  696. pytest.skip() # Reference implementation OOM
  697. device = "cuda"
  698. # set seed
  699. torch.random.manual_seed(0)
  700. batch_size = 5
  701. nheads = 6
  702. window_size = (-1, -1) if not local else torch.randint(0, seqlen, (2,))
  703. qkv = torch.randn(
  704. batch_size, seqlen, 3, nheads, d, device=device, dtype=dtype, requires_grad=True
  705. )
  706. key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode="random")
  707. # key_padding_mask = generate_random_padding_mask(seqlen, batch_size, device, mode='full')
  708. if alibi:
  709. alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
  710. attn_bias = attn_bias_from_alibi_slopes(
  711. alibi_slopes, seqlen, seqlen, key_padding_mask, key_padding_mask, causal=causal
  712. )
  713. else:
  714. alibi_slopes, attn_bias = None, None
  715. qkv_unpad, cu_seqlens, max_seqlen, qkv, output_pad_fn, dqkv_pad_fn = generate_qkv(
  716. *qkv.unbind(dim=2), key_padding_mask, key_padding_mask, qkvpacked=True
  717. )
  718. out_unpad, sm_lse, S_dmask = flash_attn_varlen_qkvpacked_func(
  719. qkv_unpad,
  720. cu_seqlens,
  721. max_seqlen,
  722. dropout_p,
  723. causal=causal,
  724. window_size=window_size,
  725. alibi_slopes=alibi_slopes,
  726. deterministic=deterministic,
  727. return_attn_probs=True,
  728. )
  729. out = output_pad_fn(out_unpad)
  730. if dropout_p > 0.0:
  731. S_dmask_converted = convert_flash_attn_S_to_softmax(
  732. S_dmask,
  733. seqlen,
  734. seqlen,
  735. key_padding_mask,
  736. key_padding_mask,
  737. d,
  738. dropout_p > 0.0,
  739. causal=causal,
  740. window_size=window_size,
  741. )
  742. dropout_mask = S_dmask_converted >= 0
  743. attn_unnorm = S_dmask_converted.abs()
  744. attn = normalize_flash_attn_S(
  745. attn_unnorm,
  746. qkv[:, :, 0],
  747. qkv[:, :, 1],
  748. qkv[:, :, 2],
  749. key_padding_mask,
  750. key_padding_mask,
  751. attn_bias,
  752. dropout_p > 0.0,
  753. causal=causal,
  754. window_size=window_size,
  755. )
  756. dropout_fraction = get_dropout_fraction(
  757. dropout_mask, key_padding_mask, key_padding_mask, causal=causal, window_size=window_size
  758. ).item()
  759. print(f"Actual dropout fraction: {dropout_fraction}")
  760. else:
  761. dropout_mask = None
  762. out_ref, attn_ref = attention_qkvpacked_ref(
  763. qkv,
  764. key_padding_mask,
  765. attn_bias,
  766. dropout_p,
  767. dropout_mask,
  768. causal=causal,
  769. window_size=window_size,
  770. )
  771. out_pt, attn_pt = attention_qkvpacked_ref(
  772. qkv,
  773. key_padding_mask,
  774. attn_bias,
  775. dropout_p,
  776. dropout_mask,
  777. causal=causal,
  778. window_size=window_size,
  779. upcast=False,
  780. reorder_ops=True,
  781. )
  782. print(f"Output max diff: {(out - out_ref).abs().max().item()}")
  783. print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
  784. print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
  785. print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
  786. if dropout_p > 0.0:
  787. print(f"Attention max diff: {(attn - attn_ref).abs().max().item()}")
  788. print(f"Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}")
  789. g = torch.randn_like(out)
  790. if (d <= MAX_HEADDIM_SM8x or dropout_p == 0) or (is_sm80 or is_sm90):
  791. (dqkv_unpad,) = torch.autograd.grad(out, qkv_unpad, g)
  792. dqkv = dqkv_pad_fn(dqkv_unpad)
  793. (dqkv_ref,) = torch.autograd.grad(out_ref, qkv, g)
  794. (dqkv_pt,) = torch.autograd.grad(out_pt, qkv, g)
  795. print(f"dQ max diff: {(dqkv[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}")
  796. print(f"dK max diff: {(dqkv[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}")
  797. print(f"dV max diff: {(dqkv[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}")
  798. print(f"dQKV mean diff: {(dqkv - dqkv_ref).abs().mean().item()}")
  799. print(f"dQ Pytorch max diff: {(dqkv_pt[:, :, 0] - dqkv_ref[:, :, 0]).abs().max().item()}")
  800. print(f"dK Pytorch max diff: {(dqkv_pt[:, :, 1] - dqkv_ref[:, :, 1]).abs().max().item()}")
  801. print(f"dV Pytorch max diff: {(dqkv_pt[:, :, 2] - dqkv_ref[:, :, 2]).abs().max().item()}")
  802. print(f"dQKV Pytorch mean diff: {(dqkv_pt - dqkv_ref).abs().mean().item()}")
  803. # Check that FlashAttention's numerical error is at most twice the numerical error
  804. # of a Pytorch implementation.
  805. assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()
  806. if dropout_p > 0.0:
  807. assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
  808. # With alibi, many of the prob values are 0.0 & -0.0 so dropout_fraction isn't accurate
  809. if not alibi:
  810. assert abs(dropout_fraction - dropout_p) <= (0.01 if not local else 0.025)
  811. if (d <= MAX_HEADDIM_SM8x or dropout_p == 0) or (is_sm80 or is_sm90):
  812. assert (dqkv - dqkv_ref).abs().max().item() <= 2 * (dqkv_pt - dqkv_ref).abs().max().item()
  813. @pytest.mark.parametrize("kvpacked", [True, False])
  814. # @pytest.mark.parametrize("kvpacked", [False])
  815. @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  816. # @pytest.mark.parametrize("dtype", [torch.bfloat16])
  817. @pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
  818. # @pytest.mark.parametrize("mha_type", ["mha"])
  819. @pytest.mark.parametrize("deterministic", [False, True])
  820. # @pytest.mark.parametrize("deterministic", [True])
  821. @pytest.mark.parametrize("alibi", [False, True])
  822. # @pytest.mark.parametrize("alibi", [False])
  823. @pytest.mark.parametrize("local", [False, True])
  824. # @pytest.mark.parametrize("local", [False])
  825. @pytest.mark.parametrize("causal", [False, True])
  826. # @pytest.mark.parametrize("causal", [True])
  827. @pytest.mark.parametrize("d", [32, 40, 59, 64, 96, 111, 128, 160, 192, 224, 256])
  828. # @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
  829. # @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
  830. # @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
  831. # @pytest.mark.parametrize('d', [56, 80])
  832. # @pytest.mark.parametrize("d", [64])
  833. @pytest.mark.parametrize(
  834. "seqlen_q,seqlen_k",
  835. [
  836. (113, 203),
  837. (128, 217),
  838. (113, 211),
  839. (108, 256),
  840. (256, 512),
  841. (512, 256),
  842. (1024, 1024),
  843. (1023, 1024),
  844. (1024, 1023),
  845. (2048, 2048),
  846. ],
  847. )
  848. # @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
  849. @pytest.mark.parametrize("dropout_p", [0.0, 0.17])
  850. # @pytest.mark.parametrize("dropout_p", [0.0])
  851. @pytest.mark.parametrize("softcap", [0.0, 50.0])
  852. def test_flash_attn_output(
  853. seqlen_q, seqlen_k, d, dropout_p, causal, local, alibi, deterministic, mha_type, dtype, kvpacked, softcap
  854. ):
  855. if (
  856. max(seqlen_q, seqlen_k) >= 2048
  857. and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30
  858. ):
  859. pytest.skip() # Reference implementation OOM
  860. if softcap > 0.0 and dropout_p > 0.0:
  861. pytest.skip("Softcap and dropout not supported together")
  862. device = "cuda"
  863. # set seed
  864. torch.random.manual_seed(0)
  865. batch_size = 4
  866. nheads = 6 if softcap == 0.0 else 4 # softcap reference impl takes more memory
  867. nheads_k = nheads if mha_type == "mha" else (1 if mha_type == "mqa" else 2)
  868. assert nheads % nheads_k == 0
  869. window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
  870. q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
  871. if softcap > 0:
  872. # Ensure the values of qk are at least within softcap range.
  873. q = q * softcap
  874. if kvpacked:
  875. kv = torch.randn(
  876. batch_size, seqlen_k, 2, nheads_k, d, device=device, dtype=dtype, requires_grad=True
  877. )
  878. else:
  879. k = torch.randn(
  880. batch_size, seqlen_k, nheads_k, d, device=device, dtype=dtype, requires_grad=True
  881. )
  882. v = torch.randn(
  883. batch_size, seqlen_k, nheads_k, d, device=device, dtype=dtype, requires_grad=True
  884. )
  885. if alibi:
  886. alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
  887. attn_bias = attn_bias_from_alibi_slopes(alibi_slopes, seqlen_q, seqlen_k, causal=causal)
  888. else:
  889. alibi_slopes, attn_bias = None, None
  890. if kvpacked:
  891. out, lse, S_dmask = flash_attn_kvpacked_func(
  892. q,
  893. kv,
  894. dropout_p,
  895. causal=causal,
  896. window_size=window_size,
  897. softcap=softcap,
  898. alibi_slopes=alibi_slopes,
  899. deterministic=deterministic,
  900. return_attn_probs=True,
  901. )
  902. else:
  903. out, lse, S_dmask = flash_attn_func(
  904. q,
  905. k,
  906. v,
  907. dropout_p,
  908. causal=causal,
  909. window_size=window_size,
  910. softcap=softcap,
  911. alibi_slopes=alibi_slopes,
  912. deterministic=deterministic,
  913. return_attn_probs=True,
  914. )
  915. if dropout_p > 0.0:
  916. S_dmask_converted = convert_flash_attn_S_to_softmax(
  917. S_dmask,
  918. seqlen_q,
  919. seqlen_k,
  920. None,
  921. None,
  922. d,
  923. dropout_p > 0.0,
  924. causal=causal,
  925. window_size=window_size,
  926. )
  927. dropout_mask = S_dmask_converted >= 0
  928. attn_unnorm = S_dmask_converted.abs()
  929. if kvpacked:
  930. kv_rep = repeat(kv, "b s two h d -> b s two (h g) d", g=nheads // nheads_k)
  931. k_rep, v_rep = kv_rep.unbind(dim=2)
  932. else:
  933. k_rep = repeat(k, "b s h d -> b s (h g) d", g=nheads // nheads_k)
  934. v_rep = repeat(v, "b s h d -> b s (h g) d", g=nheads // nheads_k)
  935. attn = normalize_flash_attn_S(
  936. attn_unnorm,
  937. q,
  938. k_rep,
  939. v_rep,
  940. None,
  941. None,
  942. attn_bias,
  943. dropout_p > 0.0,
  944. causal=causal,
  945. window_size=window_size,
  946. )
  947. dropout_fraction = get_dropout_fraction(
  948. dropout_mask, None, None, causal=causal, window_size=window_size
  949. ).item()
  950. print(f"Actual dropout fraction: {dropout_fraction}")
  951. else:
  952. dropout_mask = None
  953. if kvpacked:
  954. out_ref, attn_ref = attention_kvpacked_ref(
  955. q,
  956. kv,
  957. None,
  958. None,
  959. attn_bias,
  960. dropout_p,
  961. dropout_mask,
  962. causal=causal,
  963. window_size=window_size,
  964. softcap=softcap,
  965. )
  966. out_pt, attn_pt = attention_kvpacked_ref(
  967. q,
  968. kv,
  969. None,
  970. None,
  971. attn_bias,
  972. dropout_p,
  973. dropout_mask,
  974. causal=causal,
  975. window_size=window_size,
  976. softcap=softcap,
  977. upcast=False,
  978. reorder_ops=True,
  979. )
  980. else:
  981. out_ref, attn_ref = attention_ref(
  982. q,
  983. k,
  984. v,
  985. None,
  986. None,
  987. attn_bias,
  988. dropout_p,
  989. dropout_mask,
  990. causal=causal,
  991. window_size=window_size,
  992. softcap=softcap,
  993. )
  994. out_pt, attn_pt = attention_ref(
  995. q,
  996. k,
  997. v,
  998. None,
  999. None,
  1000. attn_bias,
  1001. dropout_p,
  1002. dropout_mask,
  1003. causal=causal,
  1004. window_size=window_size,
  1005. softcap=softcap,
  1006. upcast=False,
  1007. reorder_ops=True,
  1008. )
  1009. print(f"Output max diff: {(out - out_ref).abs().max().item()}")
  1010. print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
  1011. print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
  1012. print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
  1013. if dropout_p > 0.0:
  1014. print(f"Attention max diff: {(attn - attn_ref).abs().max().item()}")
  1015. print(f"Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}")
  1016. g = torch.randn_like(out)
  1017. do_o = (g.float() * out.float()).sum(-1)
  1018. if (d <= MAX_HEADDIM_SM8x or dropout_p == 0) or (is_sm80 or is_sm90):
  1019. if kvpacked:
  1020. (
  1021. dq,
  1022. dkv,
  1023. ) = torch.autograd.grad(out, (q, kv), g)
  1024. dk, dv = dkv.unbind(2)
  1025. (
  1026. dq_ref,
  1027. dkv_ref,
  1028. ) = torch.autograd.grad(out_ref, (q, kv), g)
  1029. dk_ref, dv_ref = dkv_ref.unbind(2)
  1030. (
  1031. dq_pt,
  1032. dkv_pt,
  1033. ) = torch.autograd.grad(out_pt, (q, kv), g)
  1034. dk_pt, dv_pt = dkv_pt.unbind(2)
  1035. else:
  1036. (
  1037. dq,
  1038. dk,
  1039. dv,
  1040. ) = torch.autograd.grad(out, (q, k, v), g)
  1041. (
  1042. dq_ref,
  1043. dk_ref,
  1044. dv_ref,
  1045. ) = torch.autograd.grad(out_ref, (q, k, v), g)
  1046. (
  1047. dq_pt,
  1048. dk_pt,
  1049. dv_pt,
  1050. ) = torch.autograd.grad(out_pt, (q, k, v), g)
  1051. print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
  1052. print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
  1053. print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
  1054. print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
  1055. print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
  1056. print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
  1057. print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
  1058. print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
  1059. print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
  1060. print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
  1061. print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
  1062. print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
  1063. # Check that FlashAttention's numerical error is at most twice the numerical error
  1064. # of a Pytorch implementation.
  1065. assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()
  1066. if dropout_p > 0.0:
  1067. assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
  1068. # With alibi, many of the prob values are 0.0 & -0.0 so dropout_fraction isn't accurate
  1069. if not alibi:
  1070. assert abs(dropout_fraction - dropout_p) <= (0.01 if not local else 0.025)
  1071. if (d <= MAX_HEADDIM_SM8x or dropout_p == 0) or (is_sm80 or is_sm90):
  1072. assert (dq - dq_ref).abs().max().item() <= 3 * (dq_pt - dq_ref).abs().max().item()
  1073. assert (dk - dk_ref).abs().max().item() <= 3 * (dk_pt - dk_ref).abs().max().item()
  1074. assert (dv - dv_ref).abs().max().item() <= 3 * (dv_pt - dv_ref).abs().max().item()
  1075. @pytest.mark.parametrize("kvpacked", [True, False])
  1076. # @pytest.mark.parametrize('kvpacked', [False])
  1077. @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  1078. # @pytest.mark.parametrize('dtype', [torch.float16])
  1079. @pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
  1080. # @pytest.mark.parametrize('mha_type', ["mqa"])
  1081. @pytest.mark.parametrize("deterministic", [False, True])
  1082. # @pytest.mark.parametrize("deterministic", [True])
  1083. @pytest.mark.parametrize("alibi", [False, True])
  1084. # @pytest.mark.parametrize("alibi", [True])
  1085. @pytest.mark.parametrize("local", [False, True])
  1086. # @pytest.mark.parametrize("local", [True])
  1087. @pytest.mark.parametrize("causal", [False, True])
  1088. # @pytest.mark.parametrize('causal', [True])
  1089. @pytest.mark.parametrize("d", [32, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
  1090. # @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
  1091. # @pytest.mark.parametrize('d', [64])
  1092. @pytest.mark.parametrize(
  1093. "seqlen_q,seqlen_k",
  1094. [
  1095. (1, 147),
  1096. (113, 203),
  1097. (128, 217),
  1098. (113, 211),
  1099. (108, 256),
  1100. (256, 512),
  1101. (512, 256),
  1102. (1024, 1024),
  1103. (1023, 1024),
  1104. (1024, 1023),
  1105. (2048, 2048),
  1106. ],
  1107. )
  1108. # @pytest.mark.parametrize('seqlen_q,seqlen_k', [(128, 128)])
  1109. @pytest.mark.parametrize("dropout_p", [0.0, 0.17])
  1110. @pytest.mark.parametrize("softcap", [0.0, 50.0])
  1111. # @pytest.mark.parametrize('dropout_p', [0.0])
  1112. def test_flash_attn_varlen_output(
  1113. seqlen_q, seqlen_k, d, dropout_p, causal, local, alibi, deterministic, mha_type, dtype, kvpacked, softcap
  1114. ):
  1115. if (
  1116. max(seqlen_q, seqlen_k) >= 2048
  1117. and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30
  1118. ):
  1119. pytest.skip() # Reference implementation OOM
  1120. if softcap > 0.0 and dropout_p > 0.0:
  1121. pytest.skip("Softcap and dropout not supported together")
  1122. device = "cuda"
  1123. # set seed
  1124. torch.random.manual_seed(0)
  1125. batch_size = 4
  1126. nheads = 6 if softcap == 0.0 else 4 # softcap reference impl takes more memory
  1127. nheads_k = nheads if mha_type == "mha" else (1 if mha_type == "mqa" else 2)
  1128. assert nheads % nheads_k == 0
  1129. window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
  1130. q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
  1131. if softcap > 0:
  1132. # Ensure the values of qk are at least within softcap range.
  1133. q = q * softcap
  1134. if kvpacked:
  1135. kv = torch.randn(
  1136. batch_size, seqlen_k, 2, nheads_k, d, device=device, dtype=dtype, requires_grad=True
  1137. )
  1138. else:
  1139. k = torch.randn(
  1140. batch_size, seqlen_k, nheads_k, d, device=device, dtype=dtype, requires_grad=True
  1141. )
  1142. v = torch.randn(
  1143. batch_size, seqlen_k, nheads_k, d, device=device, dtype=dtype, requires_grad=True
  1144. )
  1145. query_padding_mask = generate_random_padding_mask(seqlen_q, batch_size, device, mode="random")
  1146. key_padding_mask = generate_random_padding_mask(seqlen_k, batch_size, device, mode="random")
  1147. # key_padding_mask = generate_random_padding_mask(seqlen_k, batch_size, device, mode='full')
  1148. if alibi:
  1149. alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
  1150. attn_bias = attn_bias_from_alibi_slopes(
  1151. alibi_slopes, seqlen_q, seqlen_k, query_padding_mask, key_padding_mask, causal=causal
  1152. )
  1153. else:
  1154. alibi_slopes, attn_bias = None, None
  1155. if kvpacked:
  1156. (
  1157. q_unpad,
  1158. kv_unpad,
  1159. cu_seqlens_q,
  1160. cu_seqlens_k,
  1161. max_seqlen_q,
  1162. max_seqlen_k,
  1163. q,
  1164. kv,
  1165. output_pad_fn,
  1166. dq_pad_fn,
  1167. dkv_pad_fn,
  1168. ) = generate_qkv(q, *kv.unbind(dim=2), query_padding_mask, key_padding_mask, kvpacked=True)
  1169. out_unpad, sm_lse, S_dmask = flash_attn_varlen_kvpacked_func(
  1170. q_unpad,
  1171. kv_unpad,
  1172. cu_seqlens_q,
  1173. cu_seqlens_k,
  1174. max_seqlen_q,
  1175. max_seqlen_k,
  1176. dropout_p,
  1177. causal=causal,
  1178. window_size=window_size,
  1179. softcap=softcap,
  1180. alibi_slopes=alibi_slopes,
  1181. deterministic=deterministic,
  1182. return_attn_probs=True,
  1183. )
  1184. else:
  1185. (
  1186. q_unpad,
  1187. k_unpad,
  1188. v_unpad,
  1189. cu_seqlens_q,
  1190. cu_seqlens_k,
  1191. max_seqlen_q,
  1192. max_seqlen_k,
  1193. q,
  1194. k,
  1195. v,
  1196. output_pad_fn,
  1197. dq_pad_fn,
  1198. dk_pad_fn,
  1199. ) = generate_qkv(q, k, v, query_padding_mask, key_padding_mask, kvpacked=False)
  1200. out_unpad, sm_lse, S_dmask = flash_attn_varlen_func(
  1201. q_unpad,
  1202. k_unpad,
  1203. v_unpad,
  1204. cu_seqlens_q,
  1205. cu_seqlens_k,
  1206. max_seqlen_q,
  1207. max_seqlen_k,
  1208. dropout_p,
  1209. causal=causal,
  1210. window_size=window_size,
  1211. softcap=softcap,
  1212. alibi_slopes=alibi_slopes,
  1213. deterministic=deterministic,
  1214. return_attn_probs=True,
  1215. )
  1216. out = output_pad_fn(out_unpad)
  1217. if dropout_p > 0.0:
  1218. S_dmask_converted = convert_flash_attn_S_to_softmax(
  1219. S_dmask,
  1220. seqlen_q,
  1221. seqlen_k,
  1222. query_padding_mask,
  1223. key_padding_mask,
  1224. d,
  1225. dropout_p > 0.0,
  1226. causal=causal,
  1227. window_size=window_size,
  1228. )
  1229. dropout_mask = S_dmask_converted >= 0
  1230. attn_unnorm = S_dmask_converted.abs()
  1231. if kvpacked:
  1232. kv_rep = repeat(kv, "b s two h d -> b s two (h g) d", g=nheads // nheads_k)
  1233. k_rep, v_rep = kv_rep.unbind(dim=2)
  1234. else:
  1235. k_rep = repeat(k, "b s h d -> b s (h g) d", g=nheads // nheads_k)
  1236. v_rep = repeat(v, "b s h d -> b s (h g) d", g=nheads // nheads_k)
  1237. attn = normalize_flash_attn_S(
  1238. attn_unnorm,
  1239. q,
  1240. k_rep,
  1241. v_rep,
  1242. query_padding_mask,
  1243. key_padding_mask,
  1244. attn_bias,
  1245. dropout_p > 0.0,
  1246. causal=causal,
  1247. window_size=window_size,
  1248. )
  1249. dropout_fraction = get_dropout_fraction(
  1250. dropout_mask,
  1251. query_padding_mask,
  1252. key_padding_mask,
  1253. causal=causal,
  1254. window_size=window_size,
  1255. ).item()
  1256. print(f"Actual dropout fraction: {dropout_fraction}")
  1257. else:
  1258. dropout_mask = None
  1259. if kvpacked:
  1260. out_ref, attn_ref = attention_kvpacked_ref(
  1261. q,
  1262. kv,
  1263. query_padding_mask,
  1264. key_padding_mask,
  1265. attn_bias,
  1266. dropout_p,
  1267. dropout_mask,
  1268. causal=causal,
  1269. window_size=window_size,
  1270. softcap=softcap,
  1271. )
  1272. out_pt, attn_pt = attention_kvpacked_ref(
  1273. q,
  1274. kv,
  1275. query_padding_mask,
  1276. key_padding_mask,
  1277. attn_bias,
  1278. dropout_p,
  1279. dropout_mask,
  1280. causal=causal,
  1281. window_size=window_size,
  1282. softcap=softcap,
  1283. upcast=False,
  1284. reorder_ops=True,
  1285. )
  1286. else:
  1287. out_ref, attn_ref = attention_ref(
  1288. q,
  1289. k,
  1290. v,
  1291. query_padding_mask,
  1292. key_padding_mask,
  1293. attn_bias,
  1294. dropout_p,
  1295. dropout_mask,
  1296. causal=causal,
  1297. window_size=window_size,
  1298. softcap=softcap,
  1299. )
  1300. out_pt, attn_pt = attention_ref(
  1301. q,
  1302. k,
  1303. v,
  1304. query_padding_mask,
  1305. key_padding_mask,
  1306. attn_bias,
  1307. dropout_p,
  1308. dropout_mask,
  1309. causal=causal,
  1310. window_size=window_size,
  1311. softcap=softcap,
  1312. upcast=False,
  1313. reorder_ops=True,
  1314. )
  1315. print(f"Output max diff: {(out - out_ref).abs().max().item()}")
  1316. print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
  1317. print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
  1318. print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
  1319. if dropout_p > 0.0:
  1320. print(f"Attention max diff: {(attn - attn_ref).abs().max().item()}")
  1321. print(f"Attention Pytorch max diff: {(attn_pt - attn_ref).abs().max().item()}")
  1322. g = torch.randn_like(out)
  1323. if ((d <= MAX_HEADDIM_SM8x or dropout_p == 0) or (is_sm80 or is_sm90)):
  1324. if kvpacked:
  1325. (
  1326. dq_unpad,
  1327. dkv_unpad,
  1328. ) = torch.autograd.grad(out, (q_unpad, kv_unpad), g)
  1329. dk, dv = dkv_pad_fn(dkv_unpad).unbind(2)
  1330. (
  1331. dq_ref,
  1332. dkv_ref,
  1333. ) = torch.autograd.grad(out_ref, (q, kv), g)
  1334. dk_ref, dv_ref = dkv_ref.unbind(2)
  1335. (
  1336. dq_pt,
  1337. dkv_pt,
  1338. ) = torch.autograd.grad(out_pt, (q, kv), g)
  1339. dk_pt, dv_pt = dkv_pt.unbind(2)
  1340. else:
  1341. (
  1342. dq_unpad,
  1343. dk_unpad,
  1344. dv_unpad,
  1345. ) = torch.autograd.grad(out, (q_unpad, k_unpad, v_unpad), g)
  1346. dk = dk_pad_fn(dk_unpad)
  1347. dv = dk_pad_fn(dv_unpad)
  1348. (
  1349. dq_ref,
  1350. dk_ref,
  1351. dv_ref,
  1352. ) = torch.autograd.grad(out_ref, (q, k, v), g)
  1353. (
  1354. dq_pt,
  1355. dk_pt,
  1356. dv_pt,
  1357. ) = torch.autograd.grad(out_pt, (q, k, v), g)
  1358. dq = dq_pad_fn(dq_unpad)
  1359. print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
  1360. print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
  1361. print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
  1362. print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
  1363. print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
  1364. print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
  1365. print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
  1366. print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
  1367. print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
  1368. print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
  1369. print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
  1370. print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
  1371. # Check that FlashAttention's numerical error is at most twice the numerical error
  1372. # of a Pytorch implementation.
  1373. assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()
  1374. if dropout_p > 0.0:
  1375. assert (attn - attn_ref).abs().max().item() <= 2 * (attn_pt - attn_ref).abs().max().item()
  1376. # With alibi, many of the prob values are 0.0 & -0.0 so dropout_fraction isn't accurate
  1377. if not alibi:
  1378. assert abs(dropout_fraction - dropout_p) <= (0.01 if not local else 0.04)
  1379. if (d <= MAX_HEADDIM_SM8x or dropout_p == 0) or (is_sm80 or is_sm90):
  1380. assert (dq - dq_ref).abs().max().item() <= 3 * (dq_pt - dq_ref).abs().max().item()
  1381. assert (dk - dk_ref).abs().max().item() <= 3 * (dk_pt - dk_ref).abs().max().item()
  1382. assert (dv - dv_ref).abs().max().item() <= 3 * (dv_pt - dv_ref).abs().max().item()
  1383. @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  1384. # @pytest.mark.parametrize("dtype", [torch.bfloat16])
  1385. @pytest.mark.parametrize("local", [False, True])
  1386. # @pytest.mark.parametrize("local", [True])
  1387. @pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
  1388. # @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
  1389. # @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
  1390. # @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
  1391. # @pytest.mark.parametrize('d', [56, 80])
  1392. # @pytest.mark.parametrize("d", [64, 128])
  1393. @pytest.mark.parametrize("swap_sq_sk", [False, True])
  1394. # @pytest.mark.parametrize("swap_sq_sk", [True])
  1395. @pytest.mark.parametrize(
  1396. "seqlen_q,seqlen_k",
  1397. [
  1398. (1, 239),
  1399. (3, 799),
  1400. (127, 512),
  1401. (127, 513),
  1402. (113, 203),
  1403. (128, 217),
  1404. (113, 211),
  1405. (108, 256),
  1406. (256, 512),
  1407. (1023, 1024),
  1408. ],
  1409. )
  1410. # @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
  1411. def test_flash_attn_causal(seqlen_q, seqlen_k, swap_sq_sk, d, local, dtype):
  1412. if (
  1413. max(seqlen_q, seqlen_k) >= 2048
  1414. and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30
  1415. ):
  1416. pytest.skip() # Reference implementation OOM
  1417. if swap_sq_sk:
  1418. seqlen_q, seqlen_k = seqlen_k, seqlen_q
  1419. device = "cuda"
  1420. causal = True
  1421. # set seed
  1422. torch.random.manual_seed(0)
  1423. batch_size = 8
  1424. nheads = 9
  1425. window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
  1426. q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
  1427. k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
  1428. v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
  1429. out = flash_attn_func(q, k, v, 0.0, causal=causal, window_size=window_size)
  1430. out_ref, attn_ref = attention_ref(
  1431. q, k, v, None, None, None, 0.0, None, causal=causal, window_size=window_size
  1432. )
  1433. out_pt, attn_pt = attention_ref(
  1434. q,
  1435. k,
  1436. v,
  1437. None,
  1438. None,
  1439. None,
  1440. 0.0,
  1441. None,
  1442. causal=causal,
  1443. window_size=window_size,
  1444. upcast=False,
  1445. reorder_ops=True,
  1446. )
  1447. print(f"Output max diff: {(out - out_ref).abs().max().item()}")
  1448. print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
  1449. print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
  1450. print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
  1451. g = torch.randn_like(out)
  1452. do_o = (g.float() * out.float()).sum(-1)
  1453. (
  1454. dq,
  1455. dk,
  1456. dv,
  1457. ) = torch.autograd.grad(out, (q, k, v), g)
  1458. (
  1459. dq_ref,
  1460. dk_ref,
  1461. dv_ref,
  1462. ) = torch.autograd.grad(out_ref, (q, k, v), g)
  1463. (
  1464. dq_pt,
  1465. dk_pt,
  1466. dv_pt,
  1467. ) = torch.autograd.grad(out_pt, (q, k, v), g)
  1468. print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
  1469. print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
  1470. print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
  1471. print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
  1472. print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
  1473. print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
  1474. print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
  1475. print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
  1476. print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
  1477. print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
  1478. print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
  1479. print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
  1480. # Check that FlashAttention's numerical error is at most twice the numerical error
  1481. # of a Pytorch implementation.
  1482. assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item() + 1e-5
  1483. assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item() + 1e-5
  1484. assert (dk - dk_ref).abs().max().item() <= 2 * (dk_pt - dk_ref).abs().max().item() + 1e-5
  1485. assert (dv - dv_ref).abs().max().item() <= 2 * (dv_pt - dv_ref).abs().max().item() + 1e-5
  1486. @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  1487. # @pytest.mark.parametrize("dtype", [torch.bfloat16])
  1488. @pytest.mark.parametrize("local", [False, True])
  1489. # @pytest.mark.parametrize("local", [True])
  1490. @pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
  1491. # @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
  1492. # @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
  1493. # @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
  1494. # @pytest.mark.parametrize('d', [56, 80])
  1495. # @pytest.mark.parametrize("d", [64])
  1496. @pytest.mark.parametrize("swap_sq_sk", [False, True])
  1497. # @pytest.mark.parametrize("swap_sq_sk", [True])
  1498. @pytest.mark.parametrize(
  1499. "seqlen_q,seqlen_k",
  1500. [
  1501. (1, 239),
  1502. (3, 799),
  1503. (127, 512),
  1504. (127, 513),
  1505. (113, 203),
  1506. (128, 217),
  1507. (113, 211),
  1508. (108, 256),
  1509. (256, 512),
  1510. (1023, 1024),
  1511. ],
  1512. )
  1513. # TODO: add smaller page sizes when https://github.com/Dao-AILab/flash-attention/pull/824 is merged
  1514. @pytest.mark.parametrize("paged_kv_block_size", [None, 256, 512])
  1515. # @pytest.mark.parametrize("seqlen_q,seqlen_k", [(256, 128)])
  1516. def test_flash_attn_varlen_causal(
  1517. seqlen_q, seqlen_k, swap_sq_sk, d, local, paged_kv_block_size, dtype
  1518. ):
  1519. if (
  1520. max(seqlen_q, seqlen_k) >= 2048
  1521. and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30
  1522. ):
  1523. pytest.skip() # Reference implementation OOM
  1524. if swap_sq_sk:
  1525. seqlen_q, seqlen_k = seqlen_k, seqlen_q
  1526. device = "cuda"
  1527. causal = True
  1528. # set seed
  1529. torch.random.manual_seed(0)
  1530. batch_size = 8
  1531. nheads = 9
  1532. window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
  1533. q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
  1534. if paged_kv_block_size is None:
  1535. k = torch.randn(
  1536. batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True
  1537. )
  1538. v = torch.randn(
  1539. batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True
  1540. )
  1541. block_table = None
  1542. else:
  1543. k, v, block_table, k_cache_paged, v_cache_paged, num_blocks = _generate_block_kvcache(
  1544. seqlen_k, paged_kv_block_size, batch_size, nheads, d, device, dtype
  1545. )
  1546. query_padding_mask = generate_random_padding_mask(seqlen_q, batch_size, device, mode="random")
  1547. key_padding_mask = generate_random_padding_mask(seqlen_k, batch_size, device, mode="random")
  1548. (
  1549. q_unpad,
  1550. k_unpad,
  1551. v_unpad,
  1552. cu_seqlens_q,
  1553. cu_seqlens_k,
  1554. max_seqlen_q,
  1555. max_seqlen_k,
  1556. q,
  1557. k,
  1558. v,
  1559. output_pad_fn,
  1560. dq_pad_fn,
  1561. dk_pad_fn,
  1562. ) = generate_qkv(q, k, v, query_padding_mask, key_padding_mask, kvpacked=False)
  1563. out_unpad = flash_attn_varlen_func(
  1564. q_unpad,
  1565. k_unpad if paged_kv_block_size is None else k_cache_paged,
  1566. v_unpad if paged_kv_block_size is None else v_cache_paged,
  1567. cu_seqlens_q,
  1568. cu_seqlens_k,
  1569. max_seqlen_q,
  1570. max_seqlen_k,
  1571. 0.0,
  1572. causal=causal,
  1573. window_size=window_size,
  1574. block_table=block_table,
  1575. )
  1576. out = output_pad_fn(out_unpad)
  1577. out_ref, attn_ref = attention_ref(
  1578. q,
  1579. k,
  1580. v,
  1581. query_padding_mask,
  1582. key_padding_mask,
  1583. None,
  1584. 0.0,
  1585. None,
  1586. causal=causal,
  1587. window_size=window_size,
  1588. )
  1589. out_pt, attn_pt = attention_ref(
  1590. q,
  1591. k,
  1592. v,
  1593. query_padding_mask,
  1594. key_padding_mask,
  1595. None,
  1596. 0.0,
  1597. None,
  1598. causal=causal,
  1599. window_size=window_size,
  1600. upcast=False,
  1601. reorder_ops=True,
  1602. )
  1603. print(f"Output max diff: {(out - out_ref).abs().max().item()}")
  1604. print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
  1605. print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
  1606. print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
  1607. g = torch.randn_like(out)
  1608. do_o = (g.float() * out.float()).sum(-1)
  1609. test_backward = block_table is None
  1610. if test_backward:
  1611. (
  1612. dq_unpad,
  1613. dk_unpad,
  1614. dv_unpad,
  1615. ) = torch.autograd.grad(out, (q_unpad, k_unpad, v_unpad), g)
  1616. dq = dq_pad_fn(dq_unpad)
  1617. dk = dk_pad_fn(dk_unpad)
  1618. dv = dk_pad_fn(dv_unpad)
  1619. (
  1620. dq_ref,
  1621. dk_ref,
  1622. dv_ref,
  1623. ) = torch.autograd.grad(out_ref, (q, k, v), g)
  1624. (
  1625. dq_pt,
  1626. dk_pt,
  1627. dv_pt,
  1628. ) = torch.autograd.grad(out_pt, (q, k, v), g)
  1629. print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
  1630. print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
  1631. print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
  1632. print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
  1633. print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
  1634. print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
  1635. print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
  1636. print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
  1637. print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
  1638. print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
  1639. print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
  1640. print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
  1641. # Check that FlashAttention's numerical error is at most twice the numerical error
  1642. # of a Pytorch implementation.
  1643. assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item() + 1e-5
  1644. if test_backward:
  1645. assert (dq - dq_ref).abs().max().item() <= 2 * (dq_pt - dq_ref).abs().max().item() + 1e-5
  1646. assert (dk - dk_ref).abs().max().item() <= 2 * (dk_pt - dk_ref).abs().max().item() + 1e-5
  1647. assert (dv - dv_ref).abs().max().item() <= 2 * (dv_pt - dv_ref).abs().max().item() + 1e-5
  1648. @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  1649. # @pytest.mark.parametrize("dtype", [torch.float16])
  1650. @pytest.mark.parametrize("deterministic", [False, True])
  1651. # @pytest.mark.parametrize("deterministic", [True])
  1652. @pytest.mark.parametrize("alibi", [False, True])
  1653. # @pytest.mark.parametrize("alibi", [True])
  1654. @pytest.mark.parametrize("local", [False, True])
  1655. # @pytest.mark.parametrize("local", [False])
  1656. @pytest.mark.parametrize("causal", [False, True])
  1657. # @pytest.mark.parametrize("causal", [True])
  1658. @pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
  1659. # @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192, 224, 256])
  1660. # @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
  1661. # @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
  1662. # @pytest.mark.parametrize('d', [56, 80])
  1663. # @pytest.mark.parametrize("d", [64])
  1664. @pytest.mark.parametrize("swap_sq_sk", [False, True])
  1665. # @pytest.mark.parametrize("swap_sq_sk", [False])
  1666. @pytest.mark.parametrize(
  1667. "seqlen_q,seqlen_k",
  1668. [
  1669. (3, 1024),
  1670. (1, 339),
  1671. (64, 800),
  1672. (3, 799),
  1673. (64, 2048),
  1674. (16, 20000),
  1675. (16, 100000),
  1676. (128, 128),
  1677. (256, 256),
  1678. ],
  1679. )
  1680. # @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
  1681. def test_flash_attn_splitkv(
  1682. seqlen_q, seqlen_k, swap_sq_sk, d, causal, local, alibi, deterministic, dtype
  1683. ):
  1684. if swap_sq_sk:
  1685. seqlen_q, seqlen_k = seqlen_k, seqlen_q
  1686. device = "cuda"
  1687. # set seed
  1688. torch.random.manual_seed(0)
  1689. batch_size = 1
  1690. nheads = 12
  1691. window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
  1692. q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
  1693. k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
  1694. v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
  1695. if alibi:
  1696. alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
  1697. attn_bias = attn_bias_from_alibi_slopes(alibi_slopes, seqlen_q, seqlen_k, causal=causal)
  1698. else:
  1699. alibi_slopes, attn_bias = None, None
  1700. out, lse, _ = flash_attn_func(
  1701. q,
  1702. k,
  1703. v,
  1704. 0.0,
  1705. causal=causal,
  1706. window_size=window_size,
  1707. alibi_slopes=alibi_slopes,
  1708. deterministic=deterministic,
  1709. return_attn_probs=True,
  1710. )
  1711. out_ref, attn_ref = attention_ref(
  1712. q, k, v, None, None, attn_bias, 0.0, None, causal=causal, window_size=window_size
  1713. )
  1714. out_pt, attn_pt = attention_ref(
  1715. q,
  1716. k,
  1717. v,
  1718. None,
  1719. None,
  1720. attn_bias,
  1721. 0.0,
  1722. None,
  1723. causal=causal,
  1724. window_size=window_size,
  1725. upcast=False,
  1726. reorder_ops=True,
  1727. )
  1728. print(f"Output max diff: {(out - out_ref).abs().max().item()}")
  1729. print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
  1730. print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
  1731. print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
  1732. g = torch.randn_like(out)
  1733. do_o = (g.float() * out.float()).sum(-1)
  1734. (
  1735. dq,
  1736. dk,
  1737. dv,
  1738. ) = torch.autograd.grad(out, (q, k, v), g)
  1739. (
  1740. dq_ref,
  1741. dk_ref,
  1742. dv_ref,
  1743. ) = torch.autograd.grad(out_ref, (q, k, v), g)
  1744. (
  1745. dq_pt,
  1746. dk_pt,
  1747. dv_pt,
  1748. ) = torch.autograd.grad(out_pt, (q, k, v), g)
  1749. print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
  1750. print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
  1751. print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
  1752. print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
  1753. print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
  1754. print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
  1755. print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
  1756. print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
  1757. print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
  1758. print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
  1759. print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
  1760. print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
  1761. # Check that FlashAttention's numerical error is at most twice the numerical error
  1762. # of a Pytorch implementation.
  1763. assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item() + 1e-5
  1764. mult = 2 if not alibi else 8
  1765. assert (dq - dq_ref).abs().max().item() <= mult * (dq_pt - dq_ref).abs().max().item() + 2e-4
  1766. assert (dk - dk_ref).abs().max().item() <= mult * (dk_pt - dk_ref).abs().max().item() + 2e-4
  1767. assert (dv - dv_ref).abs().max().item() <= mult * (dv_pt - dv_ref).abs().max().item() + 2e-4
  1768. # @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  1769. @pytest.mark.parametrize("dtype", [torch.float16])
  1770. @pytest.mark.parametrize("num_splits", [1, 0])
  1771. # @pytest.mark.parametrize("num_splits", [1])
  1772. @pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
  1773. # @pytest.mark.parametrize("mha_type", ["mha"])
  1774. @pytest.mark.parametrize("new_kv", [False, True])
  1775. # @pytest.mark.parametrize("new_kv", [False])
  1776. @pytest.mark.parametrize("alibi", [False, True])
  1777. # @pytest.mark.parametrize("alibi", [False])
  1778. @pytest.mark.parametrize("local", [False, True])
  1779. # @pytest.mark.parametrize("local", [False])
  1780. @pytest.mark.parametrize("causal", [False, True])
  1781. # @pytest.mark.parametrize("causal", [False])
  1782. @pytest.mark.parametrize("seqlen_new_eq_seqlen_q", [True, False])
  1783. # @pytest.mark.parametrize("seqlen_new_eq_seqlen_q", [True])
  1784. @pytest.mark.parametrize("rotary_interleaved", [False, True])
  1785. # @pytest.mark.parametrize("rotary_interleaved", [False])
  1786. @pytest.mark.parametrize("rotary_fraction", [0.0, 0.5, 1.0])
  1787. # @pytest.mark.parametrize("rotary_fraction", [0.0])
  1788. @pytest.mark.parametrize("paged_kv_block_size", [None, 256])
  1789. # @pytest.mark.parametrize("paged_kv_block_size", [256, 512])
  1790. # @pytest.mark.parametrize("paged_kv_block_size", [None])
  1791. @pytest.mark.parametrize("has_leftpad", [False, True])
  1792. # @pytest.mark.parametrize("has_leftpad", [True])
  1793. # @pytest.mark.parametrize("has_batch_idx", [False, True])
  1794. @pytest.mark.parametrize("has_batch_idx", [False])
  1795. @pytest.mark.parametrize("d", [32, 59, 64, 80, 128, 256])
  1796. # @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
  1797. # @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
  1798. # @pytest.mark.parametrize('d', [56, 80])
  1799. # @pytest.mark.parametrize("d", [128])
  1800. @pytest.mark.parametrize(
  1801. "seqlen_q,seqlen_k",
  1802. [
  1803. (1, 128),
  1804. (1, 339),
  1805. (3, 1024),
  1806. (64, 800),
  1807. (64, 256),
  1808. (3, 799),
  1809. (64, 2048),
  1810. (16, 20000),
  1811. (1, 128 * 1024),
  1812. (16, 128 * 1024),
  1813. (128, 128),
  1814. ],
  1815. )
  1816. # @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
  1817. def test_flash_attn_kvcache(
  1818. seqlen_q,
  1819. seqlen_k,
  1820. d,
  1821. has_batch_idx,
  1822. has_leftpad,
  1823. paged_kv_block_size,
  1824. rotary_fraction,
  1825. rotary_interleaved,
  1826. seqlen_new_eq_seqlen_q,
  1827. causal,
  1828. local,
  1829. alibi,
  1830. new_kv,
  1831. mha_type,
  1832. num_splits,
  1833. dtype,
  1834. ):
  1835. if seqlen_q > seqlen_k and new_kv:
  1836. pytest.skip()
  1837. if not new_kv and rotary_fraction > 0.0:
  1838. pytest.skip()
  1839. if has_batch_idx and paged_kv_block_size is not None:
  1840. pytest.skip()
  1841. if has_leftpad and paged_kv_block_size is not None:
  1842. pytest.skip()
  1843. device = "cuda"
  1844. # set seed
  1845. torch.random.manual_seed(0)
  1846. batch_size = 2
  1847. batch_size_cache = batch_size if not has_batch_idx else batch_size * 2
  1848. nheads = 6
  1849. # rotary_dim must be a multiple of 16, and must be <= d
  1850. rotary_dim = math.floor(int(rotary_fraction * d) / 16) * 16
  1851. nheads_k = nheads if mha_type == "mha" else (1 if mha_type == "mqa" else 3)
  1852. assert nheads % nheads_k == 0
  1853. window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
  1854. q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype)
  1855. seqlen_new = seqlen_q if seqlen_new_eq_seqlen_q else torch.randint(1, seqlen_q + 1, (1,)).item()
  1856. if new_kv:
  1857. k = torch.randn(batch_size, seqlen_new, nheads_k, d, device=device, dtype=dtype)
  1858. v = torch.randn(batch_size, seqlen_new, nheads_k, d, device=device, dtype=dtype)
  1859. else:
  1860. k, v = None, None
  1861. if paged_kv_block_size is None:
  1862. k_cache = torch.randn(batch_size_cache, seqlen_k, nheads_k, d, device=device, dtype=dtype)
  1863. v_cache = torch.randn(batch_size_cache, seqlen_k, nheads_k, d, device=device, dtype=dtype)
  1864. block_table = None
  1865. else:
  1866. (
  1867. k_cache,
  1868. v_cache,
  1869. block_table,
  1870. k_cache_paged,
  1871. v_cache_paged,
  1872. num_blocks,
  1873. ) = _generate_block_kvcache(
  1874. seqlen_k, paged_kv_block_size, batch_size, nheads_k, d, device, dtype
  1875. )
  1876. cache_seqlens = torch.randint(
  1877. 0 if new_kv else 1,
  1878. # If we don't use seqlen_q in the case of causal and rotary, cos/sin won't be long enough
  1879. (
  1880. (seqlen_k - (seqlen_q if (causal or local) and rotary_dim > 1 else seqlen_new) + 1)
  1881. if new_kv
  1882. else (seqlen_k + 1)
  1883. ),
  1884. (batch_size,),
  1885. dtype=torch.int32,
  1886. device=device,
  1887. )
  1888. if has_leftpad:
  1889. cache_leftpad = torch.cat([torch.randint(0, cache_seqlens[i].item(), (1,), dtype=torch.int32, device=device)
  1890. if cache_seqlens[i].item() > 0 else torch.zeros(1, dtype=torch.int32, device=device)
  1891. for i in range(batch_size)])
  1892. else:
  1893. cache_leftpad = None
  1894. arange = rearrange(torch.arange(seqlen_k, device=device), "s -> 1 s")
  1895. cache_seqlens_expanded = rearrange(cache_seqlens, "b -> b 1")
  1896. key_padding_mask = arange < cache_seqlens_expanded + (seqlen_new if new_kv else 0)
  1897. if has_leftpad:
  1898. key_padding_mask = torch.logical_and(
  1899. key_padding_mask, arange >= cache_leftpad.unsqueeze(-1).expand(-1, seqlen_k)
  1900. )
  1901. if has_batch_idx:
  1902. cache_batch_idx = torch.randperm(batch_size_cache, dtype=torch.int32, device=device)[
  1903. :batch_size
  1904. ]
  1905. else:
  1906. cache_batch_idx = None
  1907. if alibi:
  1908. alibi_slopes = torch.rand(batch_size, nheads, device=device, dtype=torch.float32) * 0.3
  1909. attn_bias = attn_bias_from_alibi_slopes(
  1910. alibi_slopes, seqlen_q, seqlen_k, None, key_padding_mask, causal=causal, key_leftpad=cache_leftpad
  1911. )
  1912. else:
  1913. alibi_slopes, attn_bias = None, None
  1914. # cache_seqlens = torch.tensor([64], dtype=torch.int32, device=device)
  1915. if rotary_dim > 0:
  1916. angle = (
  1917. torch.rand(
  1918. seqlen_k if paged_kv_block_size is None else num_blocks * paged_kv_block_size,
  1919. rotary_dim // 2,
  1920. device=device,
  1921. )
  1922. * 2
  1923. * math.pi
  1924. )
  1925. cos = torch.cos(angle).to(dtype=dtype)
  1926. sin = torch.sin(angle).to(dtype=dtype)
  1927. if causal or local:
  1928. q_ro = apply_rotary_emb(
  1929. q, cos, sin, seqlen_offsets=cache_seqlens, interleaved=rotary_interleaved
  1930. )
  1931. else:
  1932. q_ro = rearrange(
  1933. apply_rotary_emb(
  1934. rearrange(q, "b s h d -> b 1 (s h) d"),
  1935. cos,
  1936. sin,
  1937. seqlen_offsets=cache_seqlens,
  1938. interleaved=rotary_interleaved,
  1939. ),
  1940. "b 1 (s h) d -> b s h d",
  1941. s=seqlen_q,
  1942. )
  1943. # q_ro = q
  1944. k_ro = apply_rotary_emb(
  1945. k, cos, sin, seqlen_offsets=cache_seqlens, interleaved=rotary_interleaved
  1946. )
  1947. else:
  1948. cos, sin = None, None
  1949. q_ro, k_ro = q, k
  1950. # k_cache[:, 64:] = -1
  1951. k_cache_ref = (
  1952. k_cache if not has_batch_idx else k_cache[cache_batch_idx.to(dtype=torch.long)]
  1953. ).clone()
  1954. v_cache_ref = (
  1955. v_cache if not has_batch_idx else v_cache[cache_batch_idx.to(dtype=torch.long)]
  1956. ).clone()
  1957. if new_kv:
  1958. update_mask = torch.logical_and(
  1959. cache_seqlens_expanded <= arange, arange < cache_seqlens_expanded + seqlen_new
  1960. )
  1961. k_cache_ref[update_mask] = rearrange(k_ro, "b s ... -> (b s) ...")
  1962. v_cache_ref[update_mask] = rearrange(v, "b s ... -> (b s) ...")
  1963. k_cache_rep = repeat(k_cache_ref, "b s h d -> b s (h g) d", g=nheads // nheads_k)
  1964. v_cache_rep = repeat(v_cache_ref, "b s h d -> b s (h g) d", g=nheads // nheads_k)
  1965. out = flash_attn_with_kvcache(
  1966. q,
  1967. k_cache if paged_kv_block_size is None else k_cache_paged,
  1968. v_cache if paged_kv_block_size is None else v_cache_paged,
  1969. k,
  1970. v,
  1971. rotary_cos=cos,
  1972. rotary_sin=sin,
  1973. cache_seqlens=cache_seqlens,
  1974. cache_batch_idx=cache_batch_idx,
  1975. cache_leftpad=cache_leftpad,
  1976. block_table=block_table,
  1977. causal=causal,
  1978. window_size=window_size,
  1979. rotary_interleaved=rotary_interleaved,
  1980. alibi_slopes=alibi_slopes,
  1981. num_splits=num_splits,
  1982. )
  1983. # out = flash_attn_with_kvcache(
  1984. # q, k_cache, v_cache, cache_seqlens=cache_seqlens, causal=causal, window_size=window_size
  1985. # )
  1986. # out = flash_attn_with_kvcache(q, k_cache, v_cache, causal=causal, window_size=window_size)
  1987. # qk = torch.einsum("bqhd,bkhd->bhqk", q, k_cache_ref)
  1988. # m = qk.amax(-1, keepdim=True)
  1989. # s_tmp = torch.exp((qk - m) / math.sqrt(d))
  1990. # o1 = torch.einsum('bhst,bthd->bshd', s_tmp, v_cache_ref)
  1991. # lse_ref = torch.logsumexp(qk / math.sqrt(d), -1)
  1992. # probs = torch.softmax(qk, dim=-1)
  1993. out_ref, _ = attention_ref(
  1994. q_ro,
  1995. k_cache_rep,
  1996. v_cache_rep,
  1997. None,
  1998. key_padding_mask,
  1999. attn_bias,
  2000. 0.0,
  2001. None,
  2002. causal=causal,
  2003. window_size=window_size,
  2004. key_leftpad=cache_leftpad,
  2005. )
  2006. out_pt, _ = attention_ref(
  2007. q_ro,
  2008. k_cache_rep,
  2009. v_cache_rep,
  2010. None,
  2011. key_padding_mask,
  2012. attn_bias,
  2013. 0.0,
  2014. None,
  2015. causal=causal,
  2016. window_size=window_size,
  2017. upcast=False,
  2018. reorder_ops=True,
  2019. key_leftpad=cache_leftpad,
  2020. )
  2021. print(f"Output max diff: {(out - out_ref).abs().max().item()}")
  2022. print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
  2023. print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
  2024. print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
  2025. # Check that FlashAttention's numerical error is at most twice the numerical error
  2026. # of a Pytorch implementation.
  2027. if new_kv:
  2028. if paged_kv_block_size is None:
  2029. k_cache_select = (
  2030. k_cache if not has_batch_idx else k_cache[cache_batch_idx.to(dtype=torch.long)]
  2031. )
  2032. v_cache_select = (
  2033. v_cache if not has_batch_idx else v_cache[cache_batch_idx.to(dtype=torch.long)]
  2034. )
  2035. else:
  2036. k_cache_select = rearrange(
  2037. k_cache_paged[block_table.to(dtype=torch.long).flatten()],
  2038. "(b nblocks) block_size ... -> b (nblocks block_size) ...",
  2039. b=batch_size,
  2040. )[:, :seqlen_k]
  2041. v_cache_select = rearrange(
  2042. v_cache_paged[block_table.to(dtype=torch.long).flatten()],
  2043. "(b nblocks) block_size ... -> b (nblocks block_size) ...",
  2044. b=batch_size,
  2045. )[:, :seqlen_k]
  2046. assert torch.allclose(k_cache_select, k_cache_ref, rtol=1e-3, atol=1e-3)
  2047. assert torch.equal(v_cache_select, v_cache_ref)
  2048. mult = 3 if not alibi else 5
  2049. assert (out - out_ref).abs().max().item() <= mult * (out_pt - out_ref).abs().max().item() + 1e-5
  2050. def _generate_block_kvcache(seqlen_k, paged_kv_block_size, batch_size, nheads_k, d, device, dtype):
  2051. num_blocks = math.ceil(seqlen_k / paged_kv_block_size) * batch_size * 3
  2052. k_cache_paged = torch.randn(
  2053. num_blocks, paged_kv_block_size, nheads_k, d, device=device, dtype=dtype
  2054. )
  2055. v_cache_paged = torch.randn(
  2056. num_blocks, paged_kv_block_size, nheads_k, d, device=device, dtype=dtype
  2057. )
  2058. block_table = rearrange(
  2059. torch.randperm(num_blocks, dtype=torch.int32, device=device),
  2060. "(b nblocks) -> b nblocks",
  2061. b=batch_size,
  2062. )
  2063. k_cache = rearrange(
  2064. # pytorch 1.12 doesn't have indexing with int32
  2065. k_cache_paged[block_table.to(dtype=torch.long).flatten()],
  2066. "(b nblocks) block_size ... -> b (nblocks block_size) ...",
  2067. b=batch_size,
  2068. )[:, :seqlen_k]
  2069. v_cache = rearrange(
  2070. v_cache_paged[block_table.to(dtype=torch.long).flatten()],
  2071. "(b nblocks) block_size ... -> b (nblocks block_size) ...",
  2072. b=batch_size,
  2073. )[:, :seqlen_k]
  2074. return k_cache, v_cache, block_table, k_cache_paged, v_cache_paged, num_blocks
  2075. # @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  2076. @pytest.mark.parametrize("dtype", [torch.float16])
  2077. @pytest.mark.parametrize("causal", [False, True])
  2078. # @pytest.mark.parametrize('causal', [True])
  2079. @pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
  2080. # @pytest.mark.parametrize('d', [32, 56, 64, 80, 96, 128])
  2081. # @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192])
  2082. # @pytest.mark.parametrize('d', [128])
  2083. @pytest.mark.parametrize(
  2084. "seqlen_q,seqlen_k",
  2085. [
  2086. (1, 239),
  2087. (239, 1),
  2088. (3, 799),
  2089. (799, 3),
  2090. (1024, 128),
  2091. (97, 97),
  2092. (128, 128),
  2093. (200, 200),
  2094. (256, 256),
  2095. (257, 257),
  2096. (384, 384),
  2097. (512, 512),
  2098. (768, 768),
  2099. (1024, 1024),
  2100. ],
  2101. )
  2102. @pytest.mark.parametrize("dropout_p", [0.0, 0.17])
  2103. # @pytest.mark.parametrize("dropout_p", [0.0])
  2104. def test_flash_attn_race_condition(seqlen_q, seqlen_k, d, dropout_p, causal, dtype):
  2105. device = "cuda"
  2106. # set seed
  2107. torch.random.manual_seed(0)
  2108. batch_size = 60 # Sometimes we need large batch size for the race conditions to trigger
  2109. nheads = 4
  2110. q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
  2111. k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
  2112. v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
  2113. torch.random.manual_seed(42)
  2114. out0, lse0, _ = flash_attn_func(q, k, v, dropout_p, causal=causal, return_attn_probs=True)
  2115. g = torch.randn_like(out0)
  2116. if (d <= MAX_HEADDIM_SM8x or dropout_p == 0) or (is_sm80 or is_sm90):
  2117. (
  2118. dq0,
  2119. dk0,
  2120. dv0,
  2121. ) = torch.autograd.grad(out0, (q, k, v), g)
  2122. # Numerical error if we just do any arithmetic on dq
  2123. dq_atol = 2 * ((dq0 + 0.3 - 0.3) - dq0).abs().max().item()
  2124. for i in range(250):
  2125. torch.random.manual_seed(42)
  2126. out, lse, _ = flash_attn_func(q, k, v, dropout_p, causal=causal, return_attn_probs=True)
  2127. assert torch.equal(out, out0)
  2128. assert torch.equal(lse, lse0)
  2129. if (d <= MAX_HEADDIM_SM8x or dropout_p == 0) or (is_sm80 or is_sm90):
  2130. (
  2131. dq,
  2132. dk,
  2133. dv,
  2134. ) = torch.autograd.grad(out, (q, k, v), g)
  2135. dq_equal = torch.allclose(dq, dq0, atol=dq_atol)
  2136. if not dq_equal:
  2137. print(f"Iter {i}, {dq_atol = }, dQ max diff: {(dq - dq0).abs().max().item()}")
  2138. assert torch.equal(dv, dv0)
  2139. assert torch.equal(dk, dk0)
  2140. assert dq_equal
  2141. @pytest.mark.parametrize("dtype", [torch.float16])
  2142. @pytest.mark.parametrize("causal", [False, True])
  2143. # @pytest.mark.parametrize('causal', [False])
  2144. @pytest.mark.parametrize("d", [16, 32, 64])
  2145. # @pytest.mark.parametrize('d', [16])
  2146. @pytest.mark.parametrize("seqlen", [1, 2, 5, 17, 128])
  2147. # @pytest.mark.parametrize('seqlen', [2])
  2148. def test_flash_attn_bwd_overflow(seqlen, d, causal, dtype):
  2149. """We previously had a bug where not masking elements beyond seqlen_k caused NaN in dQ,
  2150. in the case where seqlen % 128 != 0.
  2151. """
  2152. device = "cuda"
  2153. # set seed
  2154. torch.random.manual_seed(0)
  2155. batch_size = 2
  2156. nheads = 5
  2157. q = torch.randn([batch_size, seqlen, nheads, d], dtype=dtype, device="cuda") * 5
  2158. k, v = [
  2159. torch.randn([batch_size, seqlen, nheads, d], dtype=dtype, device="cuda") * 3
  2160. for _ in range(2)
  2161. ]
  2162. q.requires_grad_(True)
  2163. k.requires_grad_(True)
  2164. v.requires_grad_(True)
  2165. out = flash_attn_func(q, k, v, causal=causal)
  2166. g = torch.randn_like(out)
  2167. out.backward(g)
  2168. q_pt = q.detach().clone().requires_grad_(True)
  2169. k_pt = k.detach().clone().requires_grad_(True)
  2170. v_pt = v.detach().clone().requires_grad_(True)
  2171. out_pt, _ = attention_ref(q_pt, k_pt, v_pt, causal=causal, upcast=False, reorder_ops=True)
  2172. out_pt.backward(g)
  2173. q_ref = q.detach().clone().requires_grad_(True)
  2174. k_ref = k.detach().clone().requires_grad_(True)
  2175. v_ref = v.detach().clone().requires_grad_(True)
  2176. out_ref, attn_ref = attention_ref(q_ref, k_ref, v_ref, causal=causal)
  2177. out_ref.backward(g)
  2178. print(f"dQ max diff: {(q.grad - q_ref.grad).abs().max().item()}")
  2179. print(f"dK max diff: {(k.grad - k_ref.grad).abs().max().item()}")
  2180. print(f"dV max diff: {(v.grad - v_ref.grad).abs().max().item()}")
  2181. print(f"dQ Pytorch max diff: {(q_pt.grad - q_ref.grad).abs().max().item()}")
  2182. print(f"dK Pytorch max diff: {(k_pt.grad - k_ref.grad).abs().max().item()}")
  2183. print(f"dV Pytorch max diff: {(v_pt.grad - v_ref.grad).abs().max().item()}")
  2184. assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()
  2185. assert (q.grad - q_ref.grad).abs().max().item() <= 5 * (
  2186. q_pt.grad - q_ref.grad
  2187. ).abs().max().item() + 1e-3
  2188. assert (k.grad - k_ref.grad).abs().max().item() <= 5 * (
  2189. k_pt.grad - k_ref.grad
  2190. ).abs().max().item() + 1e-3
  2191. assert (v.grad - v_ref.grad).abs().max().item() <= 5 * (
  2192. v_pt.grad - v_ref.grad
  2193. ).abs().max().item() + 1e-3
  2194. @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  2195. # @pytest.mark.parametrize('dtype', [torch.bfloat16])
  2196. @pytest.mark.parametrize("causal", [False, True])
  2197. # @pytest.mark.parametrize('causal', [False])
  2198. @pytest.mark.parametrize("d", [64, 128])
  2199. # @pytest.mark.parametrize('d', [64])
  2200. @pytest.mark.parametrize("seqlen", [97, 128, 200, 256])
  2201. # @pytest.mark.parametrize('seqlen', [128])
  2202. def test_flash_attn_bwd_transpose(seqlen, d, causal, dtype):
  2203. """We previously had a bug where we were using the wrong strides of dout, which shows up
  2204. when dout is not contiguous.
  2205. """
  2206. device = "cuda"
  2207. # set seed
  2208. torch.random.manual_seed(0)
  2209. batch_size = 5
  2210. nheads = 2
  2211. q, k, v = [
  2212. torch.randn([batch_size, seqlen, nheads, d], dtype=dtype, device="cuda", requires_grad=True)
  2213. for _ in range(3)
  2214. ]
  2215. out = rearrange(flash_attn_func(q, k, v, causal=causal), "b s ... -> s b ...")
  2216. # So g is not contiguous
  2217. g = torch.randn(seqlen, 2 * batch_size, nheads, d, dtype=dtype, device="cuda")[:, ::2]
  2218. out.backward(g)
  2219. q_pt = q.detach().clone().requires_grad_(True)
  2220. k_pt = k.detach().clone().requires_grad_(True)
  2221. v_pt = v.detach().clone().requires_grad_(True)
  2222. out_pt, attn_pt = attention_ref(q_pt, k_pt, v_pt, causal=causal, upcast=False, reorder_ops=True)
  2223. out_pt = rearrange(out_pt, "b s ... -> s b ...")
  2224. out_pt.backward(g)
  2225. q_ref = q.detach().clone().requires_grad_(True)
  2226. k_ref = k.detach().clone().requires_grad_(True)
  2227. v_ref = v.detach().clone().requires_grad_(True)
  2228. out_ref, attn_ref = attention_ref(q_ref, k_ref, v_ref, causal=causal)
  2229. out_ref = rearrange(out_ref, "b s ... -> s b ...")
  2230. out_ref.backward(g)
  2231. print(f"dQ max diff: {(q.grad - q_ref.grad).abs().max().item()}")
  2232. print(f"dK max diff: {(k.grad - k_ref.grad).abs().max().item()}")
  2233. print(f"dV max diff: {(v.grad - v_ref.grad).abs().max().item()}")
  2234. print(f"dQ Pytorch max diff: {(q_pt.grad - q_ref.grad).abs().max().item()}")
  2235. print(f"dK Pytorch max diff: {(k_pt.grad - k_ref.grad).abs().max().item()}")
  2236. print(f"dV Pytorch max diff: {(v_pt.grad - v_ref.grad).abs().max().item()}")
  2237. assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item()
  2238. assert (q.grad - q_ref.grad).abs().max().item() <= 2 * (
  2239. q_pt.grad - q_ref.grad
  2240. ).abs().max().item()
  2241. assert (k.grad - k_ref.grad).abs().max().item() <= 2 * (
  2242. k_pt.grad - k_ref.grad
  2243. ).abs().max().item()
  2244. assert (v.grad - v_ref.grad).abs().max().item() <= 2 * (
  2245. v_pt.grad - v_ref.grad
  2246. ).abs().max().item()
  2247. @pytest.mark.parametrize("dtype", [torch.float16])
  2248. @pytest.mark.parametrize("causal", [False, True])
  2249. # @pytest.mark.parametrize('causal', [False])
  2250. @pytest.mark.parametrize("d", [16, 32, 64])
  2251. # @pytest.mark.parametrize('d', [16])
  2252. def test_flash_attn_bwd_varlen_overflow(d, causal, dtype):
  2253. """We previously had a bug where not masking elements beyond seqlen_k caused NaN in dQ,
  2254. in the case where seqlen % 128 != 0 or varlen.
  2255. """
  2256. device = "cuda"
  2257. # set seed
  2258. torch.random.manual_seed(0)
  2259. nheads = 5
  2260. q_cuseqlen = torch.tensor([0, 76, 110, 256], device=device, dtype=torch.int32)
  2261. k_cuseqlen = torch.tensor([0, 1, 2, 3], device=device, dtype=torch.int32)
  2262. Mq = 256
  2263. Mk = 3
  2264. q = torch.randn([Mq, nheads, d], dtype=dtype, device=device) * 3
  2265. k, v = [torch.randn([Mk, nheads, d], dtype=dtype, device=device) * 3 for _ in range(2)]
  2266. q.requires_grad_(True)
  2267. k.requires_grad_(True)
  2268. v.requires_grad_(True)
  2269. out = flash_attn_varlen_func(q, k, v, q_cuseqlen, k_cuseqlen, Mq, Mk, causal=causal)
  2270. g = torch.randn_like(out)
  2271. out.backward(g)
  2272. assert not q.grad.isnan().any()
  2273. assert not k.grad.isnan().any()
  2274. assert not v.grad.isnan().any()
  2275. @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  2276. # @pytest.mark.parametrize("dtype", [torch.bfloat16])
  2277. @pytest.mark.parametrize("local", [False, True])
  2278. # @pytest.mark.parametrize("local", [True])
  2279. @pytest.mark.parametrize("causal", [False, True])
  2280. # @pytest.mark.parametrize("causal", [True])
  2281. @pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
  2282. # @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
  2283. # @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
  2284. # @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
  2285. # @pytest.mark.parametrize('d', [56, 80])
  2286. # @pytest.mark.parametrize("d", [64])
  2287. @pytest.mark.parametrize("swap_sq_sk", [False, True])
  2288. # @pytest.mark.parametrize("swap_sq_sk", [False])
  2289. @pytest.mark.parametrize(
  2290. "seqlen_q,seqlen_k",
  2291. [
  2292. (1, 239),
  2293. (3, 799),
  2294. (127, 512),
  2295. (127, 513),
  2296. (113, 203),
  2297. (128, 217),
  2298. (113, 211),
  2299. (108, 256),
  2300. (256, 512),
  2301. (1023, 1024),
  2302. ],
  2303. )
  2304. # @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
  2305. def test_flash_attn_deterministic(seqlen_q, seqlen_k, swap_sq_sk, d, causal, local, dtype):
  2306. if (
  2307. max(seqlen_q, seqlen_k) >= 2048
  2308. and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30
  2309. ):
  2310. pytest.skip() # Reference implementation OOM
  2311. if swap_sq_sk:
  2312. seqlen_q, seqlen_k = seqlen_k, seqlen_q
  2313. device = "cuda"
  2314. # set seed
  2315. torch.random.manual_seed(0)
  2316. batch_size = 4
  2317. nheads = 9
  2318. window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
  2319. q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
  2320. k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
  2321. v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
  2322. out = flash_attn_func(q, k, v, 0.0, causal=causal, window_size=window_size, deterministic=True)
  2323. g = torch.randn_like(out)
  2324. dq0, dk0, dv0 = torch.autograd.grad(out, (q, k, v), g, retain_graph=True)
  2325. for _ in range(50):
  2326. dq, dk, dv = torch.autograd.grad(out, (q, k, v), g, retain_graph=True)
  2327. assert torch.equal(dv, dv0)
  2328. assert torch.equal(dk, dk0)
  2329. assert torch.equal(dq, dq0)
  2330. @pytest.mark.parametrize("dtype", ([torch.float16] if is_sm75 else [torch.float16, torch.bfloat16]))
  2331. # @pytest.mark.parametrize("dtype", [torch.bfloat16])
  2332. @pytest.mark.parametrize("local", [False, True])
  2333. # @pytest.mark.parametrize("local", [True])
  2334. @pytest.mark.parametrize("causal", [False, True])
  2335. # @pytest.mark.parametrize("causal", [True])
  2336. @pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
  2337. # @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
  2338. # @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
  2339. # @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
  2340. # @pytest.mark.parametrize('d', [56, 80])
  2341. # @pytest.mark.parametrize("d", [64])
  2342. @pytest.mark.parametrize("swap_sq_sk", [False, True])
  2343. # @pytest.mark.parametrize("swap_sq_sk", [True])
  2344. @pytest.mark.parametrize(
  2345. "seqlen_q,seqlen_k",
  2346. [
  2347. (1, 239),
  2348. (3, 799),
  2349. (127, 512),
  2350. (127, 513),
  2351. (113, 203),
  2352. (128, 217),
  2353. (113, 211),
  2354. (108, 256),
  2355. (256, 512),
  2356. (1023, 1024),
  2357. ],
  2358. )
  2359. # @pytest.mark.parametrize("seqlen_q,seqlen_k", [(256, 128)])
  2360. def test_flash_attn_varlen_deterministic(seqlen_q, seqlen_k, swap_sq_sk, d, causal, local, dtype):
  2361. if (
  2362. max(seqlen_q, seqlen_k) >= 2048
  2363. and torch.cuda.get_device_properties("cuda").total_memory <= 16 * 2**30
  2364. ):
  2365. pytest.skip() # Reference implementation OOM
  2366. if swap_sq_sk:
  2367. seqlen_q, seqlen_k = seqlen_k, seqlen_q
  2368. device = "cuda"
  2369. # set seed
  2370. torch.random.manual_seed(0)
  2371. batch_size = 2
  2372. nheads = 9
  2373. window_size = (-1, -1) if not local else torch.randint(0, seqlen_k, (2,))
  2374. q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
  2375. k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
  2376. v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
  2377. query_padding_mask = generate_random_padding_mask(seqlen_q, batch_size, device, mode="random")
  2378. key_padding_mask = generate_random_padding_mask(seqlen_k, batch_size, device, mode="random")
  2379. (
  2380. q_unpad,
  2381. k_unpad,
  2382. v_unpad,
  2383. cu_seqlens_q,
  2384. cu_seqlens_k,
  2385. max_seqlen_q,
  2386. max_seqlen_k,
  2387. q,
  2388. k,
  2389. v,
  2390. output_pad_fn,
  2391. dq_pad_fn,
  2392. dk_pad_fn,
  2393. ) = generate_qkv(q, k, v, query_padding_mask, key_padding_mask, kvpacked=False)
  2394. out = flash_attn_varlen_func(
  2395. q_unpad,
  2396. k_unpad,
  2397. v_unpad,
  2398. cu_seqlens_q,
  2399. cu_seqlens_k,
  2400. max_seqlen_q,
  2401. max_seqlen_k,
  2402. 0.0,
  2403. causal=causal,
  2404. window_size=window_size,
  2405. deterministic=True,
  2406. )
  2407. g = torch.randn_like(out)
  2408. dq0, dk0, dv0 = torch.autograd.grad(out, (q_unpad, k_unpad, v_unpad), g, retain_graph=True)
  2409. for _ in range(50):
  2410. dq, dk, dv = torch.autograd.grad(out, (q_unpad, k_unpad, v_unpad), g, retain_graph=True)
  2411. assert torch.equal(dv, dv0)
  2412. assert torch.equal(dk, dk0)
  2413. assert torch.equal(dq, dq0)