1
0

api_server.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. import argparse
  2. import asyncio
  3. import json
  4. from contextlib import asynccontextmanager
  5. import os
  6. import importlib
  7. import inspect
  8. from typing import List, Tuple, AsyncGenerator, Optional
  9. from prometheus_client import make_asgi_app
  10. import fastapi
  11. import uvicorn
  12. from http import HTTPStatus
  13. from fastapi import Request, APIRouter, Header
  14. from fastapi.exceptions import RequestValidationError
  15. from fastapi.middleware.cors import CORSMiddleware
  16. from fastapi.responses import (JSONResponse, StreamingResponse, Response,
  17. HTMLResponse)
  18. from loguru import logger
  19. import aphrodite
  20. from aphrodite.engine.args_tools import AsyncEngineArgs
  21. from aphrodite.engine.async_aphrodite import AsyncAphrodite
  22. from aphrodite.endpoints.openai.protocol import (CompletionRequest,
  23. ChatCompletionRequest,
  24. ErrorResponse, Prompt,
  25. EmbeddingsResponse,
  26. EmbeddingsRequest)
  27. from aphrodite.common.logger import UVICORN_LOG_CONFIG
  28. from aphrodite.common.outputs import RequestOutput
  29. from aphrodite.common.sampling_params import SamplingParams, _SAMPLING_EPS
  30. from aphrodite.common.utils import random_uuid
  31. from aphrodite.endpoints.openai.serving_chat import OpenAIServingChat
  32. from aphrodite.endpoints.openai.serving_completions import (
  33. OpenAIServingCompletion)
  34. from aphrodite.endpoints.openai.protocol import KAIGenerationInputSchema
  35. from aphrodite.endpoints.openai.serving_engine import LoRA
  36. from aphrodite.transformers_utils.tokenizer import get_tokenizer
  37. import aphrodite.endpoints.openai.embeddings as OAIembeddings
  38. TIMEOUT_KEEP_ALIVE = 5 # seconds
  39. openai_serving_chat: OpenAIServingChat = None
  40. openai_serving_completion: OpenAIServingCompletion = None
  41. kai_api = APIRouter()
  42. extra_api = APIRouter()
  43. kobold_lite_ui = ""
  44. sampler_json = ""
  45. gen_cache: dict = {}
  46. @asynccontextmanager
  47. async def lifespan(app: fastapi.FastAPI):
  48. async def _force_log():
  49. while True:
  50. await asyncio.sleep(10)
  51. await engine.do_log_stats()
  52. if not engine_args.disable_log_stats:
  53. asyncio.create_task(_force_log())
  54. yield
  55. app = fastapi.FastAPI(title="Aphrodite Engine",
  56. summary="Serving language models at scale",
  57. description=("A RESTful API server compatible with "
  58. "OpenAI and KoboldAI clients. "),
  59. lifespan=lifespan)
  60. class LoRAParserAction(argparse.Action):
  61. def __call__(self, parser, namespace, values, option_string=None):
  62. lora_list = []
  63. for item in values:
  64. name, path = item.split('=')
  65. lora_list.append(LoRA(name, path))
  66. setattr(namespace, self.dest, lora_list)
  67. def parse_args():
  68. parser = argparse.ArgumentParser(
  69. description="Aphrodite OpenAI-Compatible RESTful API server.")
  70. parser.add_argument("--host", type=str, default=None, help="host name")
  71. parser.add_argument("--port", type=int, default=2242, help="port number")
  72. parser.add_argument("--allow-credentials",
  73. action="store_true",
  74. help="allow credentials")
  75. parser.add_argument("--allowed-origins",
  76. type=json.loads,
  77. default=["*"],
  78. help="allowed origins")
  79. parser.add_argument("--allowed-methods",
  80. type=json.loads,
  81. default=["*"],
  82. help="allowed methods")
  83. parser.add_argument("--allowed-headers",
  84. type=json.loads,
  85. default=["*"],
  86. help="allowed headers")
  87. parser.add_argument(
  88. "--api-keys",
  89. type=str,
  90. default=None,
  91. help=
  92. "If provided, the server will require this key to be presented in the "
  93. "header.")
  94. parser.add_argument(
  95. "--admin-key",
  96. type=str,
  97. default=None,
  98. help=
  99. "If provided, the server will require this key to be presented in the "
  100. "header for admin operations.")
  101. parser.add_argument(
  102. "--launch-kobold-api",
  103. action="store_true",
  104. help=
  105. "Launch the Kobold API server in addition to the OpenAI API server.")
  106. parser.add_argument("--max-length",
  107. type=int,
  108. default=256,
  109. help="The maximum length of the generated response. "
  110. "For use with Kobold Horde.")
  111. parser.add_argument("--served-model-name",
  112. type=str,
  113. default=None,
  114. help="The model name used in the API. If not "
  115. "specified, the model name will be the same as "
  116. "the huggingface name.")
  117. parser.add_argument(
  118. "--lora-modules",
  119. type=str,
  120. default=None,
  121. nargs='+',
  122. action=LoRAParserAction,
  123. help=
  124. "LoRA module configurations in the format name=path. Multiple modules "
  125. "can be specified.")
  126. parser.add_argument("--chat-template",
  127. type=str,
  128. default=None,
  129. help="The file path to the chat template, "
  130. "or the template in single-line form "
  131. "for the specified model")
  132. parser.add_argument("--response-role",
  133. type=str,
  134. default="assistant",
  135. help="The role name to return if "
  136. "`request.add_generation_prompt=true`.")
  137. parser.add_argument("--ssl-keyfile",
  138. type=str,
  139. default=None,
  140. help="The file path to the SSL key file")
  141. parser.add_argument("--ssl-certfile",
  142. type=str,
  143. default=None,
  144. help="The file path to the SSL cert file")
  145. parser.add_argument(
  146. "--root-path",
  147. type=str,
  148. default=None,
  149. help="FastAPI root_path when app is behind a path based routing proxy")
  150. parser.add_argument(
  151. "--middleware",
  152. type=str,
  153. action="append",
  154. default=[],
  155. help="Additional ASGI middleware to apply to the app. "
  156. "We accept multiple --middleware arguments. "
  157. "The value should be an import path. "
  158. "If a function is provided, Aphrodite will add it to the server using "
  159. "@app.middleware('http'). "
  160. "If a class is provided, Aphrodite will add it to the server using "
  161. "app.add_middleware(). ")
  162. parser = AsyncEngineArgs.add_cli_args(parser)
  163. return parser.parse_args()
  164. # Add prometheus asgi middleware to route /metrics requests
  165. metrics_app = make_asgi_app()
  166. app.mount("/metrics/", metrics_app)
  167. @app.exception_handler(RequestValidationError)
  168. async def validation_exception_handler(_, exc):
  169. err = openai_serving_chat.create_error_response(message=str(exc))
  170. return JSONResponse(err.model_dump(), status_code=HTTPStatus.BAD_REQUEST)
  171. @app.get("/health")
  172. async def health() -> Response:
  173. """Health check."""
  174. await openai_serving_chat.engine.check_health()
  175. return Response(status_code=200)
  176. @app.get("/v1/models")
  177. async def show_available_models(x_api_key: Optional[str] = Header(None)):
  178. models = await openai_serving_chat.show_available_models()
  179. return JSONResponse(content=models.model_dump())
  180. @app.post("/v1/tokenize")
  181. @app.post("/v1/token/encode")
  182. async def tokenize(request: Request,
  183. prompt: Prompt,
  184. x_api_key: Optional[str] = Header(None)):
  185. tokenized = await openai_serving_chat.tokenize(prompt)
  186. return JSONResponse(content=tokenized)
  187. @app.post("/v1/detokenize")
  188. @app.post("/v1/token/decode")
  189. async def detokenize(request: Request,
  190. token_ids: List[int],
  191. x_api_key: Optional[str] = Header(None)):
  192. detokenized = await openai_serving_chat.detokenize(token_ids)
  193. return JSONResponse(content=detokenized)
  194. @app.post("/v1/embeddings", response_model=EmbeddingsResponse)
  195. async def handle_embeddings(request: EmbeddingsRequest,
  196. x_api_key: Optional[str] = Header(None)):
  197. input = request.input
  198. if not input:
  199. raise JSONResponse(
  200. status_code=400,
  201. content={"error": "Missing required argument input"})
  202. model = request.model if request.model else None
  203. response = await OAIembeddings.embeddings(input, request.encoding_format,
  204. model)
  205. return JSONResponse(response)
  206. @app.get("/version", description="Fetch the Aphrodite Engine version.")
  207. async def show_version(x_api_key: Optional[str] = Header(None)):
  208. ver = {"version": aphrodite.__version__}
  209. return JSONResponse(content=ver)
  210. @app.get("/v1/samplers")
  211. async def show_samplers(x_api_key: Optional[str] = Header(None)):
  212. """Get the available samplers."""
  213. global sampler_json
  214. if not sampler_json:
  215. jsonpath = os.path.dirname(os.path.abspath(__file__))
  216. samplerpath = os.path.join(jsonpath, "./samplers.json")
  217. samplerpath = os.path.normpath(samplerpath) # Normalize the path
  218. if os.path.exists(samplerpath):
  219. with open(samplerpath, "r") as f:
  220. sampler_json = json.load(f)
  221. else:
  222. logger.error("Sampler JSON not found at " + samplerpath)
  223. return sampler_json
  224. @app.post("/v1/lora/load")
  225. async def load_lora(lora: LoRA, x_api_key: Optional[str] = Header(None)):
  226. openai_serving_chat.add_lora(lora)
  227. openai_serving_completion.add_lora(lora)
  228. if engine_args.enable_lora is False:
  229. logger.error("LoRA is not enabled in the engine. "
  230. "Please start the server with the "
  231. "--enable-lora flag.")
  232. return JSONResponse(content={"result": "success"})
  233. @app.delete("/v1/lora/unload")
  234. async def unload_lora(lora_name: str, x_api_key: Optional[str] = Header(None)):
  235. openai_serving_chat.remove_lora(lora_name)
  236. openai_serving_completion.remove_lora(lora_name)
  237. return JSONResponse(content={"result": "success"})
  238. @app.post("/v1/chat/completions")
  239. async def create_chat_completion(request: ChatCompletionRequest,
  240. raw_request: Request,
  241. x_api_key: Optional[str] = Header(None)):
  242. generator = await openai_serving_chat.create_chat_completion(
  243. request, raw_request)
  244. if isinstance(generator, ErrorResponse):
  245. return JSONResponse(content=generator.model_dump(),
  246. status_code=generator.code)
  247. if request.stream:
  248. return StreamingResponse(content=generator,
  249. media_type="text/event-stream")
  250. else:
  251. return JSONResponse(content=generator.model_dump())
  252. @app.post("/v1/completions")
  253. async def create_completion(request: CompletionRequest,
  254. raw_request: Request,
  255. x_api_key: Optional[str] = Header(None)):
  256. generator = await openai_serving_completion.create_completion(
  257. request, raw_request)
  258. if isinstance(generator, ErrorResponse):
  259. return JSONResponse(content=generator.model_dump(),
  260. status_code=generator.code)
  261. if request.stream:
  262. return StreamingResponse(content=generator,
  263. media_type="text/event-stream")
  264. else:
  265. return JSONResponse(content=generator.model_dump())
  266. # ============ KoboldAI API ============ #
  267. def _set_badwords(tokenizer, hf_config): # pylint: disable=redefined-outer-name
  268. # pylint: disable=global-variable-undefined
  269. global badwordsids
  270. if hf_config.bad_words_ids is not None:
  271. badwordsids = hf_config.bad_words_ids
  272. return
  273. badwordsids = [
  274. v for k, v in tokenizer.get_vocab().items()
  275. if any(c in str(k) for c in "[]")
  276. ]
  277. if tokenizer.pad_token_id in badwordsids:
  278. badwordsids.remove(tokenizer.pad_token_id)
  279. badwordsids.append(tokenizer.eos_token_id)
  280. def prepare_engine_payload(
  281. kai_payload: KAIGenerationInputSchema
  282. ) -> Tuple[SamplingParams, List[int]]:
  283. """Create SamplingParams and truncated input tokens for AsyncEngine"""
  284. if not kai_payload.genkey:
  285. kai_payload.genkey = f"kai-{random_uuid()}"
  286. # if kai_payload.max_context_length > engine_args.max_model_len:
  287. # raise ValueError(
  288. # f"max_context_length ({kai_payload.max_context_length}) "
  289. # "must be less than or equal to "
  290. # f"max_model_len ({engine_args.max_model_len})")
  291. kai_payload.top_k = kai_payload.top_k if kai_payload.top_k != 0.0 else -1
  292. kai_payload.tfs = max(_SAMPLING_EPS, kai_payload.tfs)
  293. if kai_payload.temperature < _SAMPLING_EPS:
  294. kai_payload.n = 1
  295. kai_payload.top_p = 1.0
  296. kai_payload.top_k = -1
  297. if kai_payload.dynatemp_range is not None:
  298. dynatemp_min = kai_payload.temperature - kai_payload.dynatemp_range
  299. dynatemp_max = kai_payload.temperature + kai_payload.dynatemp_range
  300. sampling_params = SamplingParams(
  301. n=kai_payload.n,
  302. best_of=kai_payload.n,
  303. repetition_penalty=kai_payload.rep_pen,
  304. temperature=kai_payload.temperature,
  305. dynatemp_min=dynatemp_min if kai_payload.dynatemp_range > 0 else 0.0,
  306. dynatemp_max=dynatemp_max if kai_payload.dynatemp_range > 0 else 0.0,
  307. dynatemp_exponent=kai_payload.dynatemp_exponent,
  308. smoothing_factor=kai_payload.smoothing_factor,
  309. smoothing_curve=kai_payload.smoothing_curve,
  310. tfs=kai_payload.tfs,
  311. top_p=kai_payload.top_p,
  312. top_k=kai_payload.top_k,
  313. top_a=kai_payload.top_a,
  314. min_p=kai_payload.min_p,
  315. typical_p=kai_payload.typical,
  316. eta_cutoff=kai_payload.eta_cutoff,
  317. epsilon_cutoff=kai_payload.eps_cutoff,
  318. mirostat_mode=kai_payload.mirostat,
  319. mirostat_tau=kai_payload.mirostat_tau,
  320. mirostat_eta=kai_payload.mirostat_eta,
  321. stop=kai_payload.stop_sequence,
  322. include_stop_str_in_output=kai_payload.include_stop_str_in_output,
  323. custom_token_bans=badwordsids
  324. if kai_payload.use_default_badwordsids else [],
  325. max_tokens=kai_payload.max_length,
  326. seed=kai_payload.sampler_seed,
  327. )
  328. max_input_tokens = max(
  329. 1, kai_payload.max_context_length - kai_payload.max_length)
  330. input_tokens = tokenizer(kai_payload.prompt).input_ids[-max_input_tokens:]
  331. return sampling_params, input_tokens
  332. @kai_api.post("/generate")
  333. async def generate(kai_payload: KAIGenerationInputSchema) -> JSONResponse:
  334. sampling_params, input_tokens = prepare_engine_payload(kai_payload)
  335. result_generator = engine.generate(None, sampling_params,
  336. kai_payload.genkey, input_tokens)
  337. final_res: RequestOutput = None
  338. previous_output = ""
  339. async for res in result_generator:
  340. final_res = res
  341. new_chunk = res.outputs[0].text[len(previous_output):]
  342. previous_output += new_chunk
  343. gen_cache[kai_payload.genkey] = previous_output
  344. assert final_res is not None
  345. del gen_cache[kai_payload.genkey]
  346. return JSONResponse(
  347. {"results": [{
  348. "text": output.text
  349. } for output in final_res.outputs]})
  350. @extra_api.post("/generate/stream")
  351. async def generate_stream(
  352. kai_payload: KAIGenerationInputSchema) -> StreamingResponse:
  353. sampling_params, input_tokens = prepare_engine_payload(kai_payload)
  354. results_generator = engine.generate(None, sampling_params,
  355. kai_payload.genkey, input_tokens)
  356. async def stream_kobold() -> AsyncGenerator[bytes, None]:
  357. previous_output = ""
  358. async for res in results_generator:
  359. new_chunk = res.outputs[0].text[len(previous_output):]
  360. previous_output += new_chunk
  361. yield b"event: message\n"
  362. yield f"data: {json.dumps({'token': new_chunk})}\n\n".encode()
  363. return StreamingResponse(stream_kobold(),
  364. headers={
  365. "Cache-Control": "no-cache",
  366. "Connection": "keep-alive",
  367. },
  368. media_type="text/event-stream")
  369. @extra_api.post("/generate/check")
  370. @extra_api.get("/generate/check")
  371. async def check_generation(request: Request):
  372. text = ""
  373. try:
  374. request_dict = await request.json()
  375. if "genkey" in request_dict and request_dict["genkey"] in gen_cache:
  376. text = gen_cache[request_dict["genkey"]]
  377. except json.JSONDecodeError:
  378. pass
  379. return JSONResponse({"results": [{"text": text}]})
  380. @extra_api.post("/abort")
  381. async def abort_generation(request: Request):
  382. try:
  383. request_dict = await request.json()
  384. if "genkey" in request_dict:
  385. await engine.abort(request_dict["genkey"])
  386. except json.JSONDecodeError:
  387. pass
  388. return JSONResponse({})
  389. @extra_api.post("/tokencount")
  390. async def count_tokens(request: Request):
  391. """Tokenize string and return token count"""
  392. request_dict = await request.json()
  393. tokenizer_result = await openai_serving_chat.tokenize(
  394. request_dict["prompt"])
  395. return JSONResponse({"value": len(tokenizer_result)})
  396. @kai_api.get("/info/version")
  397. async def get_version():
  398. """Impersonate KAI"""
  399. return JSONResponse({"result": "1.2.4"})
  400. @kai_api.get("/model")
  401. async def get_model():
  402. return JSONResponse({"result": f"aphrodite/{served_model}"})
  403. @kai_api.get("/config/soft_prompts_list")
  404. async def get_available_softprompts():
  405. """Stub for compatibility"""
  406. return JSONResponse({"values": []})
  407. @kai_api.get("/config/soft_prompt")
  408. async def get_current_softprompt():
  409. """Stub for compatibility"""
  410. return JSONResponse({"value": ""})
  411. @kai_api.put("/config/soft_prompt")
  412. async def set_current_softprompt():
  413. """Stub for compatibility"""
  414. return JSONResponse({})
  415. @kai_api.get("/config/max_length")
  416. async def get_max_length() -> JSONResponse:
  417. max_length = args.max_length
  418. return JSONResponse({"value": max_length})
  419. @kai_api.get("/config/max_context_length")
  420. @extra_api.get("/true_max_context_length")
  421. async def get_max_context_length() -> JSONResponse:
  422. max_context_length = engine_args.max_model_len
  423. return JSONResponse({"value": max_context_length})
  424. @extra_api.get("/preloadstory")
  425. async def get_preloaded_story() -> JSONResponse:
  426. """Stub for compatibility"""
  427. return JSONResponse({})
  428. @extra_api.get("/version")
  429. async def get_extra_version():
  430. """Impersonate KoboldCpp"""
  431. return JSONResponse({"result": "KoboldCpp", "version": "1.60.1"})
  432. @app.get("/")
  433. async def get_kobold_lite_ui():
  434. """Serves a cached copy of the Kobold Lite UI, loading it from disk
  435. on demand if needed."""
  436. global kobold_lite_ui
  437. if kobold_lite_ui == "":
  438. scriptpath = os.path.dirname(os.path.abspath(__file__))
  439. klitepath = os.path.join(scriptpath, "../kobold/klite.embd")
  440. klitepath = os.path.normpath(klitepath) # Normalize the path
  441. if os.path.exists(klitepath):
  442. with open(klitepath, "r") as f:
  443. kobold_lite_ui = f.read()
  444. else:
  445. logger.error("Kobold Lite UI not found at " + klitepath)
  446. return HTMLResponse(content=kobold_lite_ui)
  447. # ============ KoboldAI API ============ #
  448. if __name__ == "__main__":
  449. try:
  450. args = parse_args()
  451. if args.launch_kobold_api:
  452. logger.warning(
  453. "Launching Kobold API server in addition to OpenAI. "
  454. "Keep in mind that the Kobold API routes are NOT "
  455. "protected via the API key.")
  456. app.include_router(kai_api, prefix="/api/v1")
  457. app.include_router(kai_api,
  458. prefix="/api/latest",
  459. include_in_schema=False)
  460. app.include_router(extra_api, prefix="/api/extra")
  461. app.add_middleware(
  462. CORSMiddleware,
  463. allow_origins=args.allowed_origins,
  464. allow_credentials=args.allow_credentials,
  465. allow_methods=args.allowed_methods,
  466. allow_headers=args.allowed_headers,
  467. )
  468. if token := os.environ.get("APHRODITE_API_KEY") or args.api_keys:
  469. admin_key = os.environ.get("APHRODITE_ADMIN_KEY") or args.admin_key
  470. if admin_key is None:
  471. logger.warning("Admin key not provided. Admin operations will "
  472. "be disabled.")
  473. @app.middleware("http")
  474. async def authentication(request: Request, call_next):
  475. excluded_paths = ["/api"]
  476. if any(
  477. request.url.path.startswith(path)
  478. for path in excluded_paths):
  479. return await call_next(request)
  480. if not request.url.path.startswith("/v1"):
  481. return await call_next(request)
  482. auth_header = request.headers.get("Authorization")
  483. api_key_header = request.headers.get("x-api-key")
  484. if request.url.path.startswith("/v1/lora"):
  485. if admin_key is not None and api_key_header == admin_key:
  486. return await call_next(request)
  487. return JSONResponse(content={"error": "Unauthorized"},
  488. status_code=401)
  489. if auth_header != "Bearer " + token and api_key_header != token:
  490. return JSONResponse(content={"error": "Unauthorized"},
  491. status_code=401)
  492. return await call_next(request)
  493. for middleware in args.middleware:
  494. module_path, object_name = middleware.rsplit(".", 1)
  495. imported = getattr(importlib.import_module(module_path),
  496. object_name)
  497. if inspect.isclass(imported):
  498. app.add_middleware(imported)
  499. elif inspect.iscoroutinefunction(imported):
  500. app.middleware("http")(imported)
  501. else:
  502. raise ValueError(f"Invalid middleware {middleware}. Must be a "
  503. "function or a class.")
  504. logger.debug(f"args: {args}")
  505. if args.served_model_name is not None:
  506. served_model = args.served_model_name
  507. else:
  508. served_model = args.model
  509. engine_args = AsyncEngineArgs.from_cli_args(args)
  510. engine = AsyncAphrodite.from_engine_args(engine_args)
  511. tokenizer = get_tokenizer(
  512. engine_args.tokenizer,
  513. tokenizer_mode=engine_args.tokenizer_mode,
  514. trust_remote_code=engine_args.trust_remote_code,
  515. )
  516. chat_template = args.chat_template
  517. if chat_template is None and tokenizer.chat_template is not None:
  518. chat_template = tokenizer.chat_template
  519. openai_serving_chat = OpenAIServingChat(engine, served_model,
  520. args.response_role,
  521. args.lora_modules,
  522. args.chat_template)
  523. openai_serving_completion = OpenAIServingCompletion(
  524. engine, served_model, args.lora_modules)
  525. engine_model_config = asyncio.run(engine.get_model_config())
  526. if args.launch_kobold_api:
  527. _set_badwords(tokenizer, engine_model_config.hf_config)
  528. app.root_path = args.root_path
  529. uvicorn.run(app,
  530. host=args.host,
  531. port=args.port,
  532. log_level="info",
  533. timeout_keep_alive=TIMEOUT_KEEP_ALIVE,
  534. ssl_keyfile=args.ssl_keyfile,
  535. ssl_certfile=args.ssl_certfile,
  536. log_config=UVICORN_LOG_CONFIG)
  537. except KeyboardInterrupt:
  538. logger.info("API server stopped by user. Exiting gracefully.")
  539. except asyncio.exceptions.CancelledError:
  540. logger.info("API server stopped due to a cancelled request. "
  541. "Exiting gracefully.")