12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595 |
- # Copyright (c) 2023, Tri Dao.
- from typing import Optional, Sequence, Tuple, Union
- import torch
- import torch.nn as nn
- import os
- # isort: off
- # We need to import the CUDA kernels after importing torch
- USE_TRITON_ROCM = os.getenv("FLASH_ATTENTION_TRITON_AMD_ENABLE", "FALSE") == "TRUE"
- if USE_TRITON_ROCM:
- from .flash_attn_triton_amd import interface_fa as flash_attn_gpu
- else:
- import flash_attn_2_cuda as flash_attn_gpu
- # isort: on
- def maybe_contiguous(x):
- return x.contiguous() if x is not None and x.stride(-1) != 1 else x
- def _get_block_size_n(device, head_dim, is_dropout, is_causal):
- # This should match the block sizes in the CUDA kernel
- assert head_dim <= 256
- major, minor = torch.cuda.get_device_capability(device)
- is_sm8x = major == 8 and minor > 0 # Only include sm86 and sm89, exclude sm80 (A100)
- is_sm80 = major == 8 and minor == 0
- is_sm90 = major == 9 and minor == 0
- if head_dim <= 32:
- return 128
- if head_dim <= 64:
- return 128 if not is_dropout else 64
- elif head_dim <= 96:
- return 64
- elif head_dim <= 128:
- if is_sm8x:
- return 64 if (not is_dropout and is_causal) else 32
- else:
- return 64 if not is_dropout else 32
- elif head_dim <= 160:
- if is_sm8x:
- return 64
- else:
- return 32
- elif head_dim <= 192:
- return 64
- elif head_dim <= 224:
- return 64
- elif head_dim <= 256:
- return 64
- def round_multiple(x, m):
- return (x + m - 1) // m * m
- # torch.compile() support is only enabled for pytorch >= 2.4
- # The reason for this is that we are using the new custom_op and register_fake
- # APIs, which support inplace modification of inputs in the function itself
- if torch.__version__ >= "2.4.0":
- _torch_custom_op_wrapper = torch.library.custom_op
- _torch_register_fake_wrapper = torch.library.register_fake
- else:
- def noop_custom_op_wrapper(name, fn=None, /, *, mutates_args, device_types=None, schema=None):
- def wrap(func):
- return func
- if fn is None:
- return wrap
- return fn
- def noop_register_fake_wrapper(op, fn=None, /, *, lib=None, _stacklevel=1):
- def wrap(func):
- return func
- if fn is None:
- return wrap
- return fn
- _torch_custom_op_wrapper = noop_custom_op_wrapper
- _torch_register_fake_wrapper = noop_register_fake_wrapper
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_forward", mutates_args=(), device_types="cuda")
- def _flash_attn_forward(
- q: torch.Tensor,
- k: torch.Tensor,
- v: torch.Tensor,
- dropout_p: float,
- softmax_scale: float,
- causal: bool,
- window_size_left: int,
- window_size_right: int,
- softcap: float,
- alibi_slopes: Optional[torch.Tensor],
- return_softmax: bool
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
- out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.fwd(
- q,
- k,
- v,
- None,
- alibi_slopes,
- dropout_p,
- softmax_scale,
- causal,
- window_size_left,
- window_size_right,
- softcap,
- return_softmax,
- None,
- )
- return out, softmax_lse, S_dmask, rng_state
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_forward")
- def _flash_attn_forward_fake(
- q: torch.Tensor,
- k: torch.Tensor,
- v: torch.Tensor,
- dropout_p: float,
- softmax_scale: float,
- causal: bool,
- window_size_left: int,
- window_size_right: int,
- softcap: float,
- alibi_slopes: Optional[torch.Tensor],
- return_softmax: bool
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
- batch_size, seqlen_q, num_heads, head_size = q.shape
- seqlen_k = k.shape[1]
- out = torch.empty_like(q)
- softmax_lse = torch.empty((batch_size, num_heads, seqlen_q), dtype=torch.float32, device=q.device, layout=q.layout)
- p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
- if return_softmax:
- p = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128), round_multiple(seqlen_k, 128)), dtype=q.dtype, device=q.device, layout=q.layout)
- rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
- return out, softmax_lse, p, rng_state
- if torch.__version__ >= "2.4.0":
- _wrapped_flash_attn_forward = torch.ops.flash_attn._flash_attn_forward
- else:
- _wrapped_flash_attn_forward = _flash_attn_forward
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_forward", mutates_args=(), device_types="cuda")
- def _flash_attn_varlen_forward(
- q: torch.Tensor,
- k: torch.Tensor,
- v: torch.Tensor,
- cu_seqlens_q: torch.Tensor,
- cu_seqlens_k: torch.Tensor,
- max_seqlen_q: int,
- max_seqlen_k: int,
- dropout_p: float,
- softmax_scale: float,
- causal: bool,
- window_size_left: int = -1,
- window_size_right: int = -1,
- softcap: float = 0.0,
- alibi_slopes: Optional[torch.Tensor] = None,
- return_softmax: bool = False,
- block_table: Optional[torch.Tensor] = None,
- leftpad_k: Optional[torch.Tensor] = None,
- seqused_k: Optional[torch.Tensor] = None,
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
- out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.varlen_fwd(
- q,
- k,
- v,
- None,
- cu_seqlens_q,
- cu_seqlens_k,
- seqused_k,
- leftpad_k,
- block_table,
- alibi_slopes,
- max_seqlen_q,
- max_seqlen_k,
- dropout_p,
- softmax_scale,
- False,
- causal,
- window_size_left,
- window_size_right,
- softcap,
- return_softmax,
- None,
- )
- # if out.isnan().any() or softmax_lse.isnan().any():
- # breakpoint()
- return out, softmax_lse, S_dmask, rng_state
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_forward")
- def _flash_attn_varlen_forward_fake(
- q: torch.Tensor,
- k: torch.Tensor,
- v: torch.Tensor,
- cu_seqlens_q: torch.Tensor,
- cu_seqlens_k: torch.Tensor,
- max_seqlen_q: int,
- max_seqlen_k: int,
- dropout_p: float,
- softmax_scale: float,
- causal: bool,
- window_size_left: int = -1,
- window_size_right: int = -1,
- softcap: float = 0.0,
- alibi_slopes: Optional[torch.Tensor] = None,
- return_softmax: bool = False,
- block_table: Optional[torch.Tensor] = None,
- leftpad_k: Optional[torch.Tensor] = None,
- seqused_k: Optional[torch.Tensor] = None,
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
- paged_kv = block_table is not None
- batch_size = cu_seqlens_q.numel() - 1
- total_q, num_heads, _ = q.shape
-
- out = torch.empty_like(q)
- softmax_lse = torch.empty((num_heads, total_q), dtype=torch.float32, device=q.device, layout=q.layout)
- p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
- seqlen_q_rounded = round_multiple(max_seqlen_q, 128)
- seqlen_k_rounded = round_multiple(max_seqlen_k, 128)
- if return_softmax:
- p = torch.empty((batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded), dtype=q.dtype, device=q.device, layout=q.layout)
- rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
- return out, softmax_lse, p, rng_state
- if torch.__version__ >= "2.4.0":
- _wrapped_flash_attn_varlen_forward = torch.ops.flash_attn._flash_attn_varlen_forward
- else:
- _wrapped_flash_attn_varlen_forward = _flash_attn_varlen_forward
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
- def _flash_attn_backward(
- dout: torch.Tensor,
- q: torch.Tensor,
- k: torch.Tensor,
- v: torch.Tensor,
- out: torch.Tensor,
- softmax_lse: torch.Tensor,
- dq: Optional[torch.Tensor],
- dk: Optional[torch.Tensor],
- dv: Optional[torch.Tensor],
- dropout_p: float,
- softmax_scale: float,
- causal: bool,
- window_size_left: int,
- window_size_right: int,
- softcap: float,
- alibi_slopes: Optional[torch.Tensor],
- deterministic: bool,
- rng_state: Optional[torch.Tensor] = None,
- ) -> torch.Tensor:
- # dq, dk, dv are allocated by us so they should already be contiguous
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
- (
- dq,
- dk,
- dv,
- softmax_d,
- ) = flash_attn_gpu.bwd(
- dout,
- q,
- k,
- v,
- out,
- softmax_lse,
- dq,
- dk,
- dv,
- alibi_slopes,
- dropout_p,
- softmax_scale,
- causal,
- window_size_left,
- window_size_right,
- softcap,
- deterministic,
- None,
- rng_state,
- )
- return softmax_d
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_backward")
- def _flash_attn_backward_fake(
- dout: torch.Tensor,
- q: torch.Tensor,
- k: torch.Tensor,
- v: torch.Tensor,
- out: torch.Tensor,
- softmax_lse: torch.Tensor,
- dq: Optional[torch.Tensor],
- dk: Optional[torch.Tensor],
- dv: Optional[torch.Tensor],
- dropout_p: float,
- softmax_scale: float,
- causal: bool,
- window_size_left: int,
- window_size_right: int,
- softcap: float,
- alibi_slopes: Optional[torch.Tensor],
- deterministic: bool,
- rng_state: Optional[torch.Tensor] = None,
- ) -> torch.Tensor:
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
- if dq is None:
- dq = torch.empty_like(q)
- if dk is None:
- dk = torch.empty_like(k)
- if dv is None:
- dv = torch.empty_like(v)
- batch_size, seqlen_q, num_heads, _ = q.shape
- softmax_d = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128)), device=q.device, dtype=torch.float32)
-
- return softmax_d
- if torch.__version__ >= "2.4.0":
- _wrapped_flash_attn_backward = torch.ops.flash_attn._flash_attn_backward
- else:
- _wrapped_flash_attn_backward = _flash_attn_backward
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
- def _flash_attn_varlen_backward(
- dout: torch.Tensor,
- q: torch.Tensor,
- k: torch.Tensor,
- v: torch.Tensor,
- out: torch.Tensor,
- softmax_lse: torch.Tensor,
- dq: Optional[torch.Tensor],
- dk: Optional[torch.Tensor],
- dv: Optional[torch.Tensor],
- cu_seqlens_q: torch.Tensor,
- cu_seqlens_k: torch.Tensor,
- max_seqlen_q: int,
- max_seqlen_k: int,
- dropout_p: float,
- softmax_scale: float,
- causal: bool,
- window_size_left: int,
- window_size_right: int,
- softcap: float,
- alibi_slopes: Optional[torch.Tensor],
- deterministic: bool,
- rng_state: Optional[torch.Tensor] = None,
- ) -> torch.Tensor:
- # dq, dk, dv are allocated by us so they should already be contiguous
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
- (
- dq,
- dk,
- dv,
- softmax_d,
- ) = flash_attn_gpu.varlen_bwd(
- dout,
- q,
- k,
- v,
- out,
- softmax_lse,
- dq,
- dk,
- dv,
- cu_seqlens_q,
- cu_seqlens_k,
- alibi_slopes,
- max_seqlen_q,
- max_seqlen_k,
- dropout_p,
- softmax_scale,
- False,
- causal,
- window_size_left,
- window_size_right,
- softcap,
- deterministic,
- None,
- rng_state,
- )
- # if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
- # breakpoint()
- return softmax_d
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_backward")
- def _flash_attn_varlen_backward_fake(
- dout: torch.Tensor,
- q: torch.Tensor,
- k: torch.Tensor,
- v: torch.Tensor,
- out: torch.Tensor,
- softmax_lse: torch.Tensor,
- dq: Optional[torch.Tensor],
- dk: Optional[torch.Tensor],
- dv: Optional[torch.Tensor],
- cu_seqlens_q: torch.Tensor,
- cu_seqlens_k: torch.Tensor,
- max_seqlen_q: int,
- max_seqlen_k: int,
- dropout_p: float,
- softmax_scale: float,
- causal: bool,
- window_size_left: int,
- window_size_right: int,
- softcap: float,
- alibi_slopes: Optional[torch.Tensor],
- deterministic: bool,
- rng_state: Optional[torch.Tensor] = None,
- ) -> torch.Tensor:
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
- batch_size = cu_seqlens_q.numel() - 1
- total_q, num_heads, _ = q.shape
- if dq is None:
- dq = torch.empty_like(q)
- if dk is None:
- dk = torch.empty_like(k)
- if dv is None:
- dv = torch.empty_like(v)
- softmax_d = torch.empty((num_heads, total_q + 128 * batch_size), device=q.device, dtype=torch.float32)
-
- return softmax_d
- if torch.__version__ >= "2.4.0":
- _wrapped_flash_attn_varlen_backward = torch.ops.flash_attn._flash_attn_varlen_backward
- else:
- _wrapped_flash_attn_varlen_backward = _flash_attn_varlen_backward
- class FlashAttnQKVPackedFunc(torch.autograd.Function):
- @staticmethod
- def forward(
- ctx,
- qkv,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_softmax,
- ):
- is_grad = torch.is_grad_enabled() and qkv.requires_grad
- if softmax_scale is None:
- softmax_scale = qkv.shape[-1] ** (-0.5)
- q, k, v = qkv[:, :, 0].detach(), qkv[:, :, 1].detach(), qkv[:, :, 2].detach()
- head_size_og = q.size(3)
- if head_size_og % 8 != 0:
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
- q,
- k,
- v,
- dropout_p,
- softmax_scale,
- causal=causal,
- window_size_left=window_size[0],
- window_size_right=window_size[1],
- softcap=softcap,
- alibi_slopes=alibi_slopes,
- return_softmax=return_softmax and dropout_p > 0,
- )
- if is_grad:
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
- ctx.dropout_p = dropout_p
- ctx.softmax_scale = softmax_scale
- ctx.causal = causal
- ctx.window_size = window_size
- ctx.softcap = softcap
- ctx.alibi_slopes = alibi_slopes
- ctx.deterministic = deterministic
- out = out_padded[..., :head_size_og]
- return out if not return_softmax else (out, softmax_lse, S_dmask)
- @staticmethod
- def backward(ctx, dout, *args):
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
- qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
- dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
- head_size_og = dout.size(3)
- dout_padded = dout
- if head_size_og % 8 != 0:
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
- _wrapped_flash_attn_backward(
- dout_padded,
- q,
- k,
- v,
- out,
- softmax_lse,
- dqkv[:, :, 0],
- dqkv[:, :, 1],
- dqkv[:, :, 2],
- ctx.dropout_p,
- ctx.softmax_scale,
- ctx.causal,
- ctx.window_size[0],
- ctx.window_size[1],
- ctx.softcap,
- ctx.alibi_slopes,
- ctx.deterministic,
- rng_state=rng_state,
- )
- dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
- return dqkv, None, None, None, None, None, None, None, None
- class FlashAttnVarlenQKVPackedFunc(torch.autograd.Function):
- @staticmethod
- def forward(
- ctx,
- qkv,
- cu_seqlens,
- max_seqlen,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_softmax,
- ):
- is_grad = torch.is_grad_enabled() and qkv.requires_grad
- if softmax_scale is None:
- softmax_scale = qkv.shape[-1] ** (-0.5)
- q, k, v = qkv[:, 0].detach(), qkv[:, 1].detach(), qkv[:, 2].detach()
- head_size_og = q.size(2)
- if head_size_og % 8 != 0:
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
- q,
- k,
- v,
- cu_seqlens,
- cu_seqlens,
- max_seqlen,
- max_seqlen,
- dropout_p,
- softmax_scale,
- causal=causal,
- window_size_left=window_size[0],
- window_size_right=window_size[1],
- softcap=softcap,
- alibi_slopes=alibi_slopes,
- return_softmax=return_softmax and dropout_p > 0,
- block_table=None,
- )
- if is_grad:
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, cu_seqlens, rng_state)
- ctx.dropout_p = dropout_p
- ctx.max_seqlen = max_seqlen
- ctx.softmax_scale = softmax_scale
- ctx.causal = causal
- ctx.window_size = window_size
- ctx.softcap = softcap
- ctx.alibi_slopes = alibi_slopes
- ctx.deterministic = deterministic
- out = out_padded[..., :head_size_og]
- return out if not return_softmax else (out, softmax_lse, S_dmask)
- @staticmethod
- def backward(ctx, dout, *args):
- q, k, v, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
- qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
- dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
- head_size_og = dout.size(2)
- dout_padded = dout
- if head_size_og % 8 != 0:
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
- _wrapped_flash_attn_varlen_backward(
- dout_padded,
- q,
- k,
- v,
- out,
- softmax_lse,
- dqkv[:, 0],
- dqkv[:, 1],
- dqkv[:, 2],
- cu_seqlens,
- cu_seqlens,
- ctx.max_seqlen,
- ctx.max_seqlen,
- ctx.dropout_p,
- ctx.softmax_scale,
- ctx.causal,
- ctx.window_size[0],
- ctx.window_size[1],
- ctx.softcap,
- ctx.alibi_slopes,
- ctx.deterministic,
- rng_state=rng_state,
- )
- dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
- return dqkv, None, None, None, None, None, None, None, None, None, None
- class FlashAttnKVPackedFunc(torch.autograd.Function):
- @staticmethod
- def forward(
- ctx,
- q,
- kv,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_softmax,
- ):
- is_grad = torch.is_grad_enabled() and any(
- x.requires_grad for x in [q, kv]
- )
- if softmax_scale is None:
- softmax_scale = q.shape[-1] ** (-0.5)
- k, v = kv[:, :, 0].detach(), kv[:, :, 1].detach()
- head_size_og = q.size(3)
- if head_size_og % 8 != 0:
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
- q,
- k,
- v,
- dropout_p,
- softmax_scale,
- causal=causal,
- window_size_left=window_size[0],
- window_size_right=window_size[1],
- softcap=softcap,
- alibi_slopes=alibi_slopes,
- return_softmax=return_softmax and dropout_p > 0,
- )
- if is_grad:
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
- ctx.dropout_p = dropout_p
- ctx.softmax_scale = softmax_scale
- ctx.causal = causal
- ctx.window_size = window_size
- ctx.softcap = softcap
- ctx.alibi_slopes = alibi_slopes
- ctx.deterministic = deterministic
- out = out_padded[..., :head_size_og]
- return out if not return_softmax else (out, softmax_lse, S_dmask)
- @staticmethod
- def backward(ctx, dout, *args):
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
- dq = torch.empty_like(q)
- kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
- dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
- head_size_og = dout.size(3)
- dout_padded = dout
- if head_size_og % 8 != 0:
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
- _wrapped_flash_attn_backward(
- dout_padded,
- q,
- k,
- v,
- out,
- softmax_lse,
- dq,
- dkv[:, :, 0],
- dkv[:, :, 1],
- ctx.dropout_p,
- ctx.softmax_scale,
- ctx.causal,
- ctx.window_size[0],
- ctx.window_size[1],
- ctx.softcap,
- ctx.alibi_slopes,
- ctx.deterministic,
- rng_state=rng_state,
- )
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
- dkv = dkv[..., : dout.shape[-1]]
- return dq, dkv, None, None, None, None, None, None, None, None
- class FlashAttnVarlenKVPackedFunc(torch.autograd.Function):
- @staticmethod
- def forward(
- ctx,
- q,
- kv,
- cu_seqlens_q,
- cu_seqlens_k,
- max_seqlen_q,
- max_seqlen_k,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_softmax,
- ):
- is_grad = torch.is_grad_enabled() and any(
- x.requires_grad for x in [q, kv]
- )
- if softmax_scale is None:
- softmax_scale = q.shape[-1] ** (-0.5)
- k, v = kv[:, 0].detach(), kv[:, 1].detach()
- head_size_og = q.size(2)
- if head_size_og % 8 != 0:
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
- q,
- k,
- v,
- cu_seqlens_q,
- cu_seqlens_k,
- max_seqlen_q,
- max_seqlen_k,
- dropout_p,
- softmax_scale,
- causal=causal,
- window_size_left=window_size[0],
- window_size_right=window_size[1],
- softcap=softcap,
- alibi_slopes=alibi_slopes,
- return_softmax=return_softmax and dropout_p > 0,
- block_table=None,
- )
- if is_grad:
- ctx.save_for_backward(
- q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
- )
- ctx.dropout_p = dropout_p
- ctx.max_seqlen_q = max_seqlen_q
- ctx.max_seqlen_k = max_seqlen_k
- ctx.softmax_scale = softmax_scale
- ctx.causal = causal
- ctx.window_size = window_size
- ctx.softcap = softcap
- ctx.alibi_slopes = alibi_slopes
- ctx.deterministic = deterministic
- out = out_padded[..., :head_size_og]
- return out if not return_softmax else (out, softmax_lse, S_dmask)
- @staticmethod
- def backward(ctx, dout, *args):
- q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
- dq = torch.empty_like(q)
- kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
- dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
- head_size_og = dout.size(2)
- dout_padded = dout
- if head_size_og % 8 != 0:
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
- _wrapped_flash_attn_varlen_backward(
- dout_padded,
- q,
- k,
- v,
- out,
- softmax_lse,
- dq,
- dkv[:, 0],
- dkv[:, 1],
- cu_seqlens_q,
- cu_seqlens_k,
- ctx.max_seqlen_q,
- ctx.max_seqlen_k,
- ctx.dropout_p,
- ctx.softmax_scale,
- ctx.causal,
- ctx.window_size[0],
- ctx.window_size[1],
- ctx.softcap,
- ctx.alibi_slopes,
- ctx.deterministic,
- rng_state=rng_state,
- )
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
- dkv = dkv[..., : dout.shape[-1]]
- return dq, dkv, None, None, None, None, None, None, None, None, None, None, None, None
- class FlashAttnFunc(torch.autograd.Function):
- @staticmethod
- def forward(
- ctx,
- q,
- k,
- v,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_softmax,
- ):
- is_grad = torch.is_grad_enabled() and any(
- x.requires_grad for x in [q, k, v]
- )
- if softmax_scale is None:
- softmax_scale = q.shape[-1] ** (-0.5)
- head_size_og = q.size(3)
- if head_size_og % 8 != 0:
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
- q,
- k,
- v,
- dropout_p,
- softmax_scale,
- causal=causal,
- window_size_left=window_size[0],
- window_size_right=window_size[1],
- softcap=softcap,
- alibi_slopes=alibi_slopes,
- return_softmax=return_softmax and dropout_p > 0,
- )
- if is_grad:
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
- ctx.dropout_p = dropout_p
- ctx.softmax_scale = softmax_scale
- ctx.causal = causal
- ctx.window_size = window_size
- ctx.softcap = softcap
- ctx.alibi_slopes = alibi_slopes
- ctx.deterministic = deterministic
- out = out_padded[..., :head_size_og]
- return out if not return_softmax else (out, softmax_lse, S_dmask)
- @staticmethod
- def backward(ctx, dout, *args):
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
- dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
- head_size_og = dout.size(3)
- dout_padded = dout
- if head_size_og % 8 != 0:
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
- _wrapped_flash_attn_backward(
- dout_padded,
- q,
- k,
- v,
- out,
- softmax_lse,
- dq,
- dk,
- dv,
- ctx.dropout_p,
- ctx.softmax_scale,
- ctx.causal,
- ctx.window_size[0],
- ctx.window_size[1],
- ctx.softcap,
- ctx.alibi_slopes,
- ctx.deterministic,
- rng_state=rng_state,
- )
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
- dk = dk[..., : dout.shape[-1]]
- dv = dv[..., : dout.shape[-1]]
- return dq, dk, dv, None, None, None, None, None, None, None, None
- class FlashAttnVarlenFunc(torch.autograd.Function):
- @staticmethod
- def forward(
- ctx,
- q,
- k,
- v,
- cu_seqlens_q,
- cu_seqlens_k,
- max_seqlen_q,
- max_seqlen_k,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_softmax,
- block_table,
- ):
- is_grad = torch.is_grad_enabled() and any(
- x.requires_grad for x in [q, k, v]
- )
- if softmax_scale is None:
- softmax_scale = q.shape[-1] ** (-0.5)
- head_size_og = q.size(2)
- if head_size_og % 8 != 0:
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
- q,
- k,
- v,
- cu_seqlens_q,
- cu_seqlens_k,
- max_seqlen_q,
- max_seqlen_k,
- dropout_p,
- softmax_scale,
- causal=causal,
- window_size_left=window_size[0],
- window_size_right=window_size[1],
- softcap=softcap,
- alibi_slopes=alibi_slopes,
- return_softmax=return_softmax and dropout_p > 0,
- block_table=block_table,
- )
- if is_grad:
- ctx.save_for_backward(
- q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
- )
- ctx.dropout_p = dropout_p
- ctx.max_seqlen_q = max_seqlen_q
- ctx.max_seqlen_k = max_seqlen_k
- ctx.softmax_scale = softmax_scale
- ctx.causal = causal
- ctx.window_size = window_size
- ctx.softcap = softcap
- ctx.alibi_slopes = alibi_slopes
- ctx.deterministic = deterministic
- out = out_padded[..., :head_size_og]
- return out if not return_softmax else (out, softmax_lse, S_dmask)
- @staticmethod
- def backward(ctx, dout, *args):
- q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
- dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
- head_size_og = dout.size(2)
- dout_padded = dout
- if head_size_og % 8 != 0:
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
- _wrapped_flash_attn_varlen_backward(
- dout_padded,
- q,
- k,
- v,
- out,
- softmax_lse,
- dq,
- dk,
- dv,
- cu_seqlens_q,
- cu_seqlens_k,
- ctx.max_seqlen_q,
- ctx.max_seqlen_k,
- ctx.dropout_p,
- ctx.softmax_scale,
- ctx.causal,
- ctx.window_size[0],
- ctx.window_size[1],
- ctx.softcap,
- ctx.alibi_slopes,
- ctx.deterministic,
- rng_state=rng_state,
- )
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
- dk = dk[..., : dout.shape[-1]]
- dv = dv[..., : dout.shape[-1]]
- return dq, dk, dv, None, None, None, None, None, None, None, None, None, None, None, None, None
- def flash_attn_qkvpacked_func(
- qkv,
- dropout_p=0.0,
- softmax_scale=None,
- causal=False,
- window_size=(-1, -1), # -1 means infinite context window
- softcap=0.0, # <=0.0 means deactivate
- alibi_slopes=None,
- deterministic=False,
- return_attn_probs=False,
- ):
- """dropout_p should be set to 0.0 during evaluation
- If Q, K, V are already stacked into 1 tensor, this function will be faster than
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
- of the gradients of Q, K, V.
- For multi-query and grouped-query attention (MQA/GQA), please see
- flash_attn_kvpacked_func and flash_attn_func.
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
- will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
- Arguments:
- qkv: (batch_size, seqlen, 3, nheads, headdim)
- dropout_p: float. Dropout probability.
- softmax_scale: float. The scaling of QK^T before applying softmax.
- Default to 1 / sqrt(headdim).
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
- softcap: float. Anything > 0 activates softcapping attention.
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
- the attention score of query i and key j.
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
- which is slightly slower and uses more memory. The forward pass is always deterministic.
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
- testing only. The returned probabilities are not guaranteed to be correct
- (they might not have the right scaling).
- Return:
- out: (batch_size, seqlen, nheads, headdim).
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
- normalization factor).
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
- The output of softmax (possibly with different scaling). It also encodes the dropout
- pattern (negative means that location was dropped, nonnegative means it was kept).
- """
- return FlashAttnQKVPackedFunc.apply(
- qkv,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_attn_probs,
- )
- def flash_attn_kvpacked_func(
- q,
- kv,
- dropout_p=0.0,
- softmax_scale=None,
- causal=False,
- window_size=(-1, -1), # -1 means infinite context window
- softcap=0.0, # 0.0 means deactivated
- alibi_slopes=None,
- deterministic=False,
- return_attn_probs=False,
- ):
- """dropout_p should be set to 0.0 during evaluation
- If K, V are already stacked into 1 tensor, this function will be faster than
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
- of the gradients of K, V.
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
- 1 1 1 1 0
- 1 1 1 1 1
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
- 0 0
- 0 0
- 0 0
- 1 0
- 1 1
- If the row of the mask is all zero, the output will be zero.
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
- will only attend to keys between
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
- Arguments:
- q: (batch_size, seqlen, nheads, headdim)
- kv: (batch_size, seqlen, 2, nheads_k, headdim)
- dropout_p: float. Dropout probability.
- softmax_scale: float. The scaling of QK^T before applying softmax.
- Default to 1 / sqrt(headdim).
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
- softcap: float. Anything > 0 activates softcapping attention.
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
- is added to the attention score of query i and key j.
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
- which is slightly slower and uses more memory. The forward pass is always deterministic.
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
- testing only. The returned probabilities are not guaranteed to be correct
- (they might not have the right scaling).
- Return:
- out: (batch_size, seqlen, nheads, headdim).
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
- normalization factor).
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
- The output of softmax (possibly with different scaling). It also encodes the dropout
- pattern (negative means that location was dropped, nonnegative means it was kept).
- """
- return FlashAttnKVPackedFunc.apply(
- q,
- kv,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_attn_probs,
- )
- def flash_attn_func(
- q,
- k,
- v,
- dropout_p=0.0,
- softmax_scale=None,
- causal=False,
- window_size=(-1, -1), # -1 means infinite context window
- softcap=0.0, # 0.0 means deactivated
- alibi_slopes=None,
- deterministic=False,
- return_attn_probs=False,
- ):
- """dropout_p should be set to 0.0 during evaluation
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
- 1 1 1 1 0
- 1 1 1 1 1
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
- 0 0
- 0 0
- 0 0
- 1 0
- 1 1
- If the row of the mask is all zero, the output will be zero.
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
- will only attend to keys between
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
- Arguments:
- q: (batch_size, seqlen, nheads, headdim)
- k: (batch_size, seqlen, nheads_k, headdim)
- v: (batch_size, seqlen, nheads_k, headdim)
- dropout_p: float. Dropout probability.
- softmax_scale: float. The scaling of QK^T before applying softmax.
- Default to 1 / sqrt(headdim).
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
- is added to the attention score of query i and key j.
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
- which is slightly slower and uses more memory. The forward pass is always deterministic.
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
- testing only. The returned probabilities are not guaranteed to be correct
- (they might not have the right scaling).
- Return:
- out: (batch_size, seqlen, nheads, headdim).
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
- normalization factor).
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
- The output of softmax (possibly with different scaling). It also encodes the dropout
- pattern (negative means that location was dropped, nonnegative means it was kept).
- """
- return FlashAttnFunc.apply(
- q,
- k,
- v,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_attn_probs,
- )
- def flash_attn_varlen_qkvpacked_func(
- qkv,
- cu_seqlens,
- max_seqlen,
- dropout_p=0.0,
- softmax_scale=None,
- causal=False,
- window_size=(-1, -1), # -1 means infinite context window
- softcap=0.0, # 0.0 means deactivated
- alibi_slopes=None,
- deterministic=False,
- return_attn_probs=False,
- ):
- """dropout_p should be set to 0.0 during evaluation
- If Q, K, V are already stacked into 1 tensor, this function will be faster than
- calling flash_attn_varlen_func on Q, K, V since the backward pass avoids explicit concatenation
- of the gradients of Q, K, V.
- For multi-query and grouped-query attention (MQA/GQA), please see
- flash_attn_varlen_kvpacked_func and flash_attn_varlen_func.
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
- will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
- Arguments:
- qkv: (total, 3, nheads, headdim), where total = total number of tokens in the batch.
- cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
- of the sequences in the batch, used to index into qkv.
- max_seqlen: int. Maximum sequence length in the batch.
- dropout_p: float. Dropout probability.
- softmax_scale: float. The scaling of QK^T before applying softmax.
- Default to 1 / sqrt(headdim).
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
- softcap: float. Anything > 0 activates softcapping attention.
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|)
- is added to the attention score of query i and key j.
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
- which is slightly slower and uses more memory. The forward pass is always deterministic.
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
- testing only. The returned probabilities are not guaranteed to be correct
- (they might not have the right scaling).
- Return:
- out: (total, nheads, headdim).
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
- normalization factor).
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
- The output of softmax (possibly with different scaling). It also encodes the dropout
- pattern (negative means that location was dropped, nonnegative means it was kept).
- """
- return FlashAttnVarlenQKVPackedFunc.apply(
- qkv,
- cu_seqlens,
- max_seqlen,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_attn_probs,
- )
- def flash_attn_varlen_kvpacked_func(
- q,
- kv,
- cu_seqlens_q,
- cu_seqlens_k,
- max_seqlen_q,
- max_seqlen_k,
- dropout_p=0.0,
- softmax_scale=None,
- causal=False,
- window_size=(-1, -1), # -1 means infinite context window
- softcap=0.0, # 0.0 means deactivated
- alibi_slopes=None,
- deterministic=False,
- return_attn_probs=False,
- ):
- """dropout_p should be set to 0.0 during evaluation
- If K, V are already stacked into 1 tensor, this function will be faster than
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
- of the gradients of K, V.
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
- 1 1 1 1 0
- 1 1 1 1 1
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
- 0 0
- 0 0
- 0 0
- 1 0
- 1 1
- If the row of the mask is all zero, the output will be zero.
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
- will only attend to keys between
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
- Arguments:
- q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
- kv: (total_k, 2, nheads_k, headdim), where total_k = total number of key tokens in the batch.
- cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
- of the sequences in the batch, used to index into q.
- cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
- of the sequences in the batch, used to index into kv.
- max_seqlen_q: int. Maximum query sequence length in the batch.
- max_seqlen_k: int. Maximum key sequence length in the batch.
- dropout_p: float. Dropout probability.
- softmax_scale: float. The scaling of QK^T before applying softmax.
- Default to 1 / sqrt(headdim).
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
- softcap: float. Anything > 0 activates softcapping attention.
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
- is added to the attention score of query i and key j.
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
- which is slightly slower and uses more memory. The forward pass is always deterministic.
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
- testing only. The returned probabilities are not guaranteed to be correct
- (they might not have the right scaling).
- Return:
- out: (total, nheads, headdim).
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
- normalization factor).
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
- The output of softmax (possibly with different scaling). It also encodes the dropout
- pattern (negative means that location was dropped, nonnegative means it was kept).
- """
- return FlashAttnVarlenKVPackedFunc.apply(
- q,
- kv,
- cu_seqlens_q,
- cu_seqlens_k,
- max_seqlen_q,
- max_seqlen_k,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_attn_probs,
- )
- def flash_attn_varlen_func(
- q,
- k,
- v,
- cu_seqlens_q,
- cu_seqlens_k,
- max_seqlen_q,
- max_seqlen_k,
- dropout_p=0.0,
- softmax_scale=None,
- causal=False,
- window_size=(-1, -1), # -1 means infinite context window
- softcap=0.0, # 0.0 means deactivated
- alibi_slopes=None,
- deterministic=False,
- return_attn_probs=False,
- block_table=None,
- ):
- """dropout_p should be set to 0.0 during evaluation
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in K, V with fewer heads
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
- 1 1 1 1 0
- 1 1 1 1 1
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
- 0 0
- 0 0
- 0 0
- 1 0
- 1 1
- If the row of the mask is all zero, the output will be zero.
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
- will only attend to keys between
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
- Arguments:
- q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
- k: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
- v: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
- cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
- of the sequences in the batch, used to index into q.
- cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
- of the sequences in the batch, used to index into kv.
- max_seqlen_q: int. Maximum query sequence length in the batch.
- max_seqlen_k: int. Maximum key sequence length in the batch.
- dropout_p: float. Dropout probability.
- softmax_scale: float. The scaling of QK^T before applying softmax.
- Default to 1 / sqrt(headdim).
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
- softcap: float. Anything > 0 activates softcapping attention.
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
- is added to the attention score of query i and key j.
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
- which is slightly slower and uses more memory. The forward pass is always deterministic.
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
- testing only. The returned probabilities are not guaranteed to be correct
- (they might not have the right scaling).
- Return:
- out: (total, nheads, headdim).
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
- normalization factor).
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
- The output of softmax (possibly with different scaling). It also encodes the dropout
- pattern (negative means that location was dropped, nonnegative means it was kept).
- """
- return FlashAttnVarlenFunc.apply(
- q,
- k,
- v,
- cu_seqlens_q,
- cu_seqlens_k,
- max_seqlen_q,
- max_seqlen_k,
- dropout_p,
- softmax_scale,
- causal,
- window_size,
- softcap,
- alibi_slopes,
- deterministic,
- return_attn_probs,
- block_table,
- )
- def flash_attn_with_kvcache(
- q,
- k_cache,
- v_cache,
- k=None,
- v=None,
- rotary_cos=None,
- rotary_sin=None,
- cache_seqlens: Optional[Union[(int, torch.Tensor)]] = None,
- cache_batch_idx: Optional[torch.Tensor] = None,
- cache_leftpad: Optional[torch.Tensor] = None,
- block_table: Optional[torch.Tensor] = None,
- softmax_scale=None,
- causal=False,
- window_size=(-1, -1), # -1 means infinite context window
- softcap=0.0, # 0.0 means deactivated
- rotary_interleaved=True,
- alibi_slopes=None,
- num_splits=0,
- return_softmax_lse=False,
- ):
- """
- If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
- k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
- the previous step, and update them with the new keys/values from the current step, and do
- attention with the updated cache, all in 1 kernel.
- If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
- For example, the KV cache could be pre-allocated with the max sequence length, and you can use
- cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.
- Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
- rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
- If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
- and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
- If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
- indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).
- See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
- 1 1 1 1 0
- 1 1 1 1 1
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
- 0 0
- 0 0
- 0 0
- 1 0
- 1 1
- If the row of the mask is all zero, the output will be zero.
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
- will only attend to keys between
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
- Note: Does not support backward pass.
- Arguments:
- q: (batch_size, seqlen, nheads, headdim)
- k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
- or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
- page_block_size must be a multiple of 256.
- v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
- or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
- k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
- k with k_cache, starting at the indices specified by cache_seqlens.
- v [optional]: (batch_size, seqlen_new, nheads_k, headdim). Similar to k.
- rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
- to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
- rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
- cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
- KV cache.
- cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
- If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
- If the indices are not distinct, and k and v are provided, the values updated in the cache
- might come from any of the duplicate indices.
- cache_leftpad: (batch_size,), dtype torch.int32. The index that the KV cache starts. If None, assume 0.
- block_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
- softmax_scale: float. The scaling of QK^T before applying softmax.
- Default to 1 / sqrt(headdim).
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
- softcap: float. Anything > 0 activates softcapping attention.
- rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
- If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
- rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
- (i.e. GPT-NeoX style).
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
- is added to the attention score of query i and key j.
- num_splits: int. If > 1, split the key/value into this many chunks along the sequence.
- If num_splits == 1, we don't split the key/value. If num_splits == 0, we use a heuristic
- to automatically determine the number of splits.
- Don't change this unless you know what you are doing.
- return_softmax_lse: bool. Whether to return the logsumexp of the attention scores.
- Return:
- out: (batch_size, seqlen, nheads, headdim).
- softmax_lse [optional, if return_softmax_lse=True]: (batch_size, nheads, seqlen). The
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
- normalization factor).
- """
- assert k_cache.stride(-1) == 1, "k_cache must have contiguous last dimension"
- assert v_cache.stride(-1) == 1, "v_cache must have contiguous last dimension"
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
- if softmax_scale is None:
- softmax_scale = q.shape[-1] ** (-0.5)
- if cache_seqlens is not None and isinstance(cache_seqlens, int):
- cache_seqlens = torch.full(
- (k_cache.shape[0],), cache_seqlens, dtype=torch.int32, device=k_cache.device
- )
- cache_seqlens = maybe_contiguous(cache_seqlens)
- cache_batch_idx = maybe_contiguous(cache_batch_idx)
- block_table = maybe_contiguous(block_table)
- out, softmax_lse = flash_attn_gpu.fwd_kvcache(
- q,
- k_cache,
- v_cache,
- k,
- v,
- cache_seqlens,
- rotary_cos,
- rotary_sin,
- cache_batch_idx,
- cache_leftpad,
- block_table,
- alibi_slopes,
- None,
- softmax_scale,
- causal,
- window_size[0],
- window_size[1],
- softcap,
- rotary_interleaved,
- num_splits,
- )
- return (out, softmax_lse) if return_softmax_lse else out
|