openai_model.py 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. from io import BytesIO
  2. from PIL import Image
  3. import base64
  4. from openai import OpenAI
  5. import json
  6. class OpenAIModel:
  7. def __init__(self, name):
  8. config = json.load(open("config.json"))
  9. api_key = config['llms']['openai']['api_key'].strip()
  10. api_base = config['llms']['openai']['api_base'].strip()
  11. self.client = OpenAI(api_key=api_key, base_url=api_base)
  12. self.name = name
  13. self.hparams = config['hparams']
  14. self.hparams.update(config['llms']['openai'].get('hparams') or {})
  15. def make_request(self, conversation, add_image=None, max_tokens=None, json=False):
  16. conversation = [{"role": "user" if i%2 == 0 else "assistant", "content": content} for i,content in enumerate(conversation)]
  17. if add_image:
  18. buffered = BytesIO()
  19. add_image.convert("RGB").save(buffered, format="JPEG")
  20. img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
  21. img_str = f"data:image/jpeg;base64,{img_str}"
  22. conversation[0]['content'] = [{"type": "text", "text": conversation[0]['content']},
  23. {
  24. "type": "image_url",
  25. "image_url": {
  26. "url": img_str
  27. }
  28. }
  29. ]
  30. kwargs = {
  31. "messages": conversation,
  32. "max_tokens": max_tokens,
  33. }
  34. kwargs.update(self.hparams)
  35. for k,v in list(kwargs.items()):
  36. if v is None:
  37. del kwargs[k]
  38. if json:
  39. kwargs['response_format'] = { "type": "json_object" }
  40. if self.name.startswith("o1"):
  41. del kwargs['temperature']
  42. out = self.client.chat.completions.create(
  43. model=self.name,
  44. **kwargs
  45. )
  46. return out.choices[0].message.content
  47. if __name__ == "__main__":
  48. import sys
  49. #q = sys.stdin.read().strip()
  50. q = "hello there"
  51. print(q+":", OpenAIModel("o1-mini").make_request([q]))