cli.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. # The CLI entrypoint to Aphrodite.
  2. import argparse
  3. import os
  4. import signal
  5. import subprocess
  6. import sys
  7. from typing import Optional
  8. import uvloop
  9. import yaml
  10. from openai import OpenAI
  11. from aphrodite.common.utils import FlexibleArgumentParser
  12. from aphrodite.endpoints.openai.api_server import run_server
  13. from aphrodite.endpoints.openai.args import make_arg_parser
  14. def registrer_signal_handlers():
  15. def signal_handler(sig, frame):
  16. sys.exit(0)
  17. signal.signal(signal.SIGINT, signal_handler)
  18. signal.signal(signal.SIGTSTP, signal_handler)
  19. def serve(args: argparse.Namespace) -> None:
  20. # EngineArgs expects the model name to be passed as --model.
  21. args.model = args.model_tag
  22. uvloop.run(run_server(args))
  23. def interactive_cli(args: argparse.Namespace) -> None:
  24. registrer_signal_handlers()
  25. base_url = args.url
  26. api_key = args.api_key or os.environ.get("OPENAI_API_KEY", "EMPTY")
  27. openai_client = OpenAI(api_key=api_key, base_url=base_url)
  28. if args.model_name:
  29. model_name = args.model_name
  30. else:
  31. available_models = openai_client.models.list()
  32. model_name = available_models.data[0].id
  33. print(f"Using model: {model_name}")
  34. if args.command == "complete":
  35. complete(model_name, openai_client)
  36. elif args.command == "chat":
  37. chat(args.system_prompt, model_name, openai_client)
  38. def complete(model_name: str, client: OpenAI) -> None:
  39. print("Please enter prompt to complete:")
  40. while True:
  41. input_prompt = input("> ")
  42. completion = client.completions.create(model=model_name,
  43. prompt=input_prompt)
  44. output = completion.choices[0].text
  45. print(output)
  46. def chat(system_prompt: Optional[str], model_name: str,
  47. client: OpenAI) -> None:
  48. conversation = []
  49. if system_prompt is not None:
  50. conversation.append({"role": "system", "content": system_prompt})
  51. print("Please enter a message for the chat model:")
  52. while True:
  53. input_message = input("> ")
  54. message = {"role": "user", "content": input_message}
  55. conversation.append(message)
  56. chat_completion = client.chat.completions.create(model=model_name,
  57. messages=conversation)
  58. response_message = chat_completion.choices[0].message
  59. output = response_message.content
  60. conversation.append(response_message)
  61. print(output)
  62. STR_BOOLS = ['enforce_eager', 'enable_chunked_prefill']
  63. ADAPTERS = ['lora_modules', 'prompt_adapters']
  64. # TODO: refactor this to directly call run_server with the config file
  65. def serve_yaml(args: argparse.Namespace) -> None:
  66. def append_cmd_args(cmd, key, value):
  67. if value: # Skip appending if value is empty
  68. if key in ADAPTERS and isinstance(value, list):
  69. adapters = [f"{k}={v}" for k, v in value[0].items() if v]
  70. if adapters:
  71. cmd.append(f"--{key}")
  72. cmd.extend(adapters)
  73. else:
  74. cmd.append(f"--{key}")
  75. if isinstance(value, bool):
  76. if key in STR_BOOLS:
  77. cmd.append(str(value).lower())
  78. elif value:
  79. cmd.append(f"--{key}")
  80. else:
  81. cmd.append(str(value))
  82. with open(args.config_file, 'r') as f:
  83. config = yaml.safe_load(f)
  84. cmd = ["python3", "-m", "aphrodite.endpoints.openai.api_server"]
  85. for key, value in config.items():
  86. if isinstance(value, list):
  87. for item in value:
  88. for sub_key, sub_value in item.items():
  89. append_cmd_args(cmd, sub_key, sub_value)
  90. else:
  91. append_cmd_args(cmd, key, value)
  92. process = subprocess.Popen(cmd)
  93. try:
  94. process.wait()
  95. except KeyboardInterrupt:
  96. process.terminate()
  97. process.wait()
  98. def _add_query_options(
  99. parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
  100. parser.add_argument(
  101. "--url",
  102. type=str,
  103. default="http://localhost:2242/v1",
  104. help="url of the running OpenAI-Compatible RESTful API server")
  105. parser.add_argument(
  106. "--model-name",
  107. type=str,
  108. default=None,
  109. help=("The model name used in prompt completion, default to "
  110. "the first model in list models API call."))
  111. parser.add_argument(
  112. "--api-key",
  113. type=str,
  114. default=None,
  115. help=(
  116. "API key for OpenAI services. If provided, this api key "
  117. "will overwrite the api key obtained through environment variables."
  118. ))
  119. return parser
  120. def main():
  121. parser = FlexibleArgumentParser(description="Aphrodite CLI")
  122. subparsers = parser.add_subparsers(required=True)
  123. serve_parser = subparsers.add_parser(
  124. "run",
  125. help="Start the Aphrodite OpenAI Compatible API server",
  126. usage="aphrodite run <model_tag> [options]")
  127. serve_parser.add_argument("model_tag",
  128. type=str,
  129. help="The model tag to serve")
  130. serve_parser = make_arg_parser(serve_parser)
  131. serve_parser.set_defaults(dispatch_function=serve)
  132. complete_parser = subparsers.add_parser(
  133. "complete",
  134. help=("Generate text completions based on the given prompt "
  135. "via the running API server"),
  136. usage="aphrodite complete [options]")
  137. _add_query_options(complete_parser)
  138. complete_parser.set_defaults(dispatch_function=interactive_cli,
  139. command="complete")
  140. chat_parser = subparsers.add_parser(
  141. "chat",
  142. help="Generate chat completions via the running API server",
  143. usage="aphrodite chat [options]")
  144. _add_query_options(chat_parser)
  145. chat_parser.add_argument(
  146. "--system-prompt",
  147. type=str,
  148. default=None,
  149. help=("The system prompt to be added to the chat template, "
  150. "used for models that support system prompts."))
  151. chat_parser.set_defaults(dispatch_function=interactive_cli, command="chat")
  152. yaml_parser = subparsers.add_parser(
  153. "yaml",
  154. help="Start the Aphrodite OpenAI Compatible API server with a YAML "
  155. "config file",
  156. usage="aphrodite yaml <config.yaml>")
  157. yaml_parser.add_argument("config_file",
  158. type=str,
  159. help="The YAML configuration file to use")
  160. yaml_parser.set_defaults(dispatch_function=serve_yaml)
  161. args = parser.parse_args()
  162. # One of the sub commands should be executed.
  163. if hasattr(args, "dispatch_function"):
  164. args.dispatch_function(args)
  165. else:
  166. parser.print_help()
  167. if __name__ == "__main__":
  168. main()