· 2 years ago · Mar 08, 2023, 01:10 PM
1import openai
2import re
3import textwrap
4import threading
5import json
6# Set your API key here, or put in in the OPENAI_API_KEY environment variable.
7openai.api_key = "sk-AnTNJXWOxpuKAbxKXLQNT3BlbkFJtXzTUvTGgkaH"
8
9def comment_callback(address, view, response):
10 response = "\n".join(textwrap.wrap(response, 80, replace_whitespace=False))
11 print(response)
12 print("gpt-3.5-turbo query finished!")
13
14
15def query_model(query, cb, max_tokens=2500):
16 """
17 Function which sends a query to gpt-3.5-turbo and calls a callback when the response is available.
18 Blocks until the response is received
19 :param query: The request to send to gpt-3.5-turbo
20 :param cb: Tu function to which the response will be passed to.
21 """
22 try:
23 response = openai.ChatCompletion.create(
24 model="gpt-3.5-turbo",
25 messages=[
26 {"role": "user", "content": query}
27 ]
28 )
29
30 print(str(response["choices"][0]['message']["content"]))
31
32 print('-------------------------------- DEBUG --------------------------------')
33 print(response)
34
35
36 except openai.InvalidRequestError as e:
37 # Context length exceeded. Determine the max number of tokens we can ask for and retry.
38 m = re.search(r'maximum context length is (\d+) tokens, however you requested \d+ tokens \((\d+) in your '
39 r'prompt;', str(e))
40 if not m:
41 print(("gpt-3.5-turbo could not complete the request: {error}").format(error=str(e)))
42 return
43 (hard_limit, prompt_tokens) = (int(m.group(1)), int(m.group(2)))
44 max_tokens = hard_limit - prompt_tokens
45 if max_tokens >= 750:
46 print(_("Context length exceeded! Reducing the completion tokens to "
47 "{max_tokens}...").format(max_tokens=max_tokens))
48 query_model(query, cb, max_tokens)
49 else:
50 print("Unfortunately, this function is too big to be analyzed with the model's current API limits.")
51
52 except openai.OpenAIError as e:
53 print(("gpt-3.5-turbo could not complete the request: {error}").format(error=str(e)))
54 except Exception as e:
55 print(("General exception encountered while running the query: {error}").format(error=str(e)))
56
57# -----------------------------------------------------------------------------
58
59def query_model_async(query, cb):
60 t = threading.Thread(target=query_model, args=[query, cb])
61 t.start()
62
63 # what conditionals for this code snippet:
64
65if __name__ == '__main__':
66 query_model_async('''
67 I have code :
68
69 with ThreadPoolExecutor(max_workers=threads) as self.__pool:
70 for anydesk_id in self.__ids:
71 future = self.__pool.submit(self.__check_id, anydesk_id)
72 results.append(future)
73
74 for f in as_completed(results):
75 anydesk_id, status = f.result()
76 if status:
77 with open(self._OUTPUT_FILES[status], "a+") as f:
78 f.write( str(anydesk_id))
79
80 how to recall __check_id function if it failed?
81
82 ''', cb=comment_callback)
83