chatapi.py 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. import os
  2. import openai
  3. # from openai import OpenAI
  4. from openai import OpenAI
  5. client = OpenAI()
  6. # Load your API key from an environment variable or secret management service
  7. #openai.api_key = ""
  8. # api_key: str = os.environ.get("OPENAI_APIKEY")
  9. # openai.api_key = api_key
  10. def get_ai_response (prompt : str = "101介紹",temperature : float = 0.7,max_tokens : int = 1000):
  11. try:
  12. response = client.chat.completions.create(
  13. model="gpt-3.5-turbo",
  14. messages=[
  15. {
  16. "role": "system",
  17. "content": "我們是台北101客服,專門負責回答遊客問題"
  18. },
  19. {
  20. "role": "user",
  21. "content": prompt
  22. }
  23. ],
  24. temperature=temperature,
  25. max_tokens=max_tokens,
  26. top_p=1
  27. )
  28. return {"state" : "success","message" : response.choices[0].message.content}
  29. except Exception as e :
  30. return {"state" : "fail","message" : str(e)}
  31. if __name__ == "__main__":
  32. print(get_ai_response(prompt="101上方75樓有啥東西"))
  33. # prompt = "陳奕凱發了一個無言表情給我,你覺得它是一個怎樣的人"
  34. # completions = openai.Completion.create(
  35. # engine="text-davinci-003",
  36. # prompt=prompt,
  37. # max_tokens=500,
  38. # n=1,
  39. # presence_penalty=0.5,
  40. # frequency_penalty=0.5
  41. # )
  42. # message = completions.choices[0].text
  43. # print(message)
  44. # prompt = '介紹一下台灣第一家做AI主播公司'
  45. # # response = openai.Completion.create(model="text-davinci-003", prompt=talk, temperature=0, max_tokens=7)
  46. # completions_params = {
  47. # "prompt": prompt,
  48. # "max_tokens": 60,
  49. # "temperature": 0.5,
  50. # "n": 1,
  51. # "stop": "\n"
  52. # }
  53. # response = openai.Completion.create(
  54. # model="text-davinci-003", #davinci:ft-choozmo-inc-2023-02-16-10-25-30
  55. # prompt=prompt,
  56. # temperature=0,
  57. # max_tokens=500,
  58. # top_p=1,
  59. # frequency_penalty=0.0,
  60. # presence_penalty=0.0,
  61. # # stop=["\n"]
  62. # )
  63. # print(response.choices[0].text)