Ver Fonte

fix: force v2 for ctxlen larger than 8192 (#100)

* fix: force v2 for ctxlen larger than 8192

* fix linting
AlpinDale há 1 ano atrás
pai
commit
f384f3ae60

+ 2 - 0
aphrodite/endpoints/kobold/api_server.py

@@ -230,11 +230,13 @@ async def get_extra_version():
     """ Impersonate KoboldCpp with streaming support """
     return JSONResponse({"result": "KoboldCpp", "version": "1.47"})
 
+
 @app.get("/health")
 async def health() -> Response:
     """Health check route for K8s"""
     return Response(status_code=200)
 
+
 @app.get("/")
 async def get_kobold_lite_ui():
     """Serves a cached copy of the Kobold Lite UI, loading it from disk on

+ 3 - 2
aphrodite/endpoints/ooba/api_server.py

@@ -2,8 +2,7 @@ import argparse
 import json
 from typing import AsyncGenerator
 
-from fastapi import (BackgroundTasks, Header, FastAPI,
-                     HTTPException, Request, Response)
+from fastapi import (BackgroundTasks, Header, FastAPI, HTTPException, Request)
 from fastapi.middleware.cors import CORSMiddleware
 from fastapi.responses import JSONResponse, Response, StreamingResponse
 import uvicorn
@@ -140,11 +139,13 @@ async def get_model_name(x_api_key: str = Header(None)) -> JSONResponse:
     else:
         return JSONResponse(content={"result": "Read Only"}, status_code=500)
 
+
 @app.get("/health")
 async def health() -> Response:
     """Health check route for K8s"""
     return Response(status_code=200)
 
+
 if __name__ == "__main__":
     engine_args = AsyncEngineArgs.from_cli_args(args)
     engine = AsyncAphrodite.from_engine_args(engine_args)

+ 1 - 0
aphrodite/endpoints/openai/api_server.py

@@ -144,6 +144,7 @@ async def check_length(
     else:
         return input_ids, None
 
+
 @app.get("/health")
 async def health() -> Response:
     """Health check route for K8s"""

+ 4 - 1
aphrodite/modeling/layers/attention.py

@@ -159,7 +159,10 @@ class PagedAttention(nn.Module):
         # sequences or heads is large, we use V1 since there is enough work
         # to parallelize.
         # TODO: Tune this heuristic.
-        use_v1 = max_num_partitions == 1 or num_seqs * num_heads > 512
+        # For context lengths > 8192, we use v2 kernel to avoid shared memory
+        # shortage.
+        use_v1 = input_metadata.max_context_len <= 8192 and (
+            max_num_partitions == 1 or num_seqs * num_heads > 512)
         if use_v1:
             # Run PagedAttention V1.
             attention_ops.paged_attention_v1(

+ 1 - 29
aphrodite/task_handler/worker.py

@@ -13,7 +13,7 @@ from aphrodite.modeling.megatron.parallel_state import (
 from aphrodite.common.sampling_params import SamplingParams
 from aphrodite.common.sequence import SamplerOutput, SequenceData, SequenceGroupMetadata
 from aphrodite.task_handler.cache_engine import CacheEngine
-from aphrodite.common.utils import get_gpu_memory, get_max_shared_memory_bytes
+from aphrodite.common.utils import get_gpu_memory
 
 
 class Worker:
@@ -141,14 +141,6 @@ class Worker:
         self.block_size = cache_config.block_size
         self.sliding_window = cache_config.sliding_window
 
-        if self.sliding_window is None:
-            max_seq_len = self.scheduler_config.max_model_len
-        else:
-            max_seq_len = min(self.scheduler_config.max_model_len,
-                              self.sliding_window)
-
-        _check_if_can_support_max_seq_len(max_seq_len, self.block_size)
-
         self.cache_engine = CacheEngine(self.cache_config, self.model_config,
                                         self.parallel_config)
         self.cache_events = self.cache_engine.events
@@ -385,26 +377,6 @@ def _pad_to_max(x: List[int], max_len: int, pad: int) -> List[int]:
     return x + [pad] * (max_len - len(x))
 
 
-def _check_if_can_support_max_seq_len(max_seq_len: int,
-                                      block_size: int) -> None:
-    # Follows the logic in
-    # attention_kernels.cu::single_query_cached_kv_attention_launcher
-    max_shared_mem = get_max_shared_memory_bytes()
-    float32_bytes = torch.finfo(torch.float).bits // 8
-    padded_max_seq_len = (
-        (max_seq_len + block_size - 1) / block_size) * block_size
-    # padded_max_seq_len + extra buffer
-    required_shared_mem = (padded_max_seq_len + 512) * float32_bytes
-    if padded_max_seq_len * float32_bytes > max_shared_mem:
-        raise RuntimeError(
-            f"Aphrodite cannot currently support max_model_len={max_seq_len} "
-            f"with block_size={block_size} on GPU with compute "
-            f"capability {torch.cuda.get_device_capability()} "
-            f"(required shared memory {required_shared_mem} > "
-            f"available shared memory {max_shared_mem}). "
-            "This will be fixed in a future release.")
-
-
 def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype):
     if torch_dtype == torch.bfloat16:
         compute_capability = torch.cuda.get_device_capability()