Hashintha commited on
Commit
81afba2
·
1 Parent(s): f9172dd

Upload 7 files

Browse files
Files changed (7) hide show
  1. app.py +287 -0
  2. bot_backend.py +232 -0
  3. config.json +17 -0
  4. functional.py +116 -0
  5. jupyter_backend.py +100 -0
  6. requirements.txt +3 -0
  7. response_parser.py +200 -0
app.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from response_parser import *
2
+ import gradio as gr
3
+ import os
4
+
5
+
6
+ username = os.getenv('USERNAME')
7
+ password = os.getenv('PASSWORD')
8
+
9
+ def initialization(state_dict: Dict) -> None:
10
+ if not os.path.exists('cache'):
11
+ os.mkdir('cache')
12
+ if state_dict["bot_backend"] is None:
13
+ state_dict["bot_backend"] = BotBackend()
14
+ if 'OPENAI_API_KEY' in os.environ:
15
+ del os.environ['OPENAI_API_KEY']
16
+
17
+
18
+ def get_bot_backend(state_dict: Dict) -> BotBackend:
19
+ return state_dict["bot_backend"]
20
+
21
+
22
+ def switch_to_gpt4(state_dict: Dict, whether_switch: bool) -> None:
23
+ bot_backend = get_bot_backend(state_dict)
24
+ if whether_switch:
25
+ bot_backend.update_gpt_model_choice("GPT-4")
26
+ else:
27
+ bot_backend.update_gpt_model_choice("GPT-3.5")
28
+
29
+
30
+ def add_text(state_dict: Dict, history: List, text: str) -> Tuple[List, Dict]:
31
+ bot_backend = get_bot_backend(state_dict)
32
+ bot_backend.add_text_message(user_text=text)
33
+
34
+ history = history + [(text, None)]
35
+
36
+ return history, gr.update(value="", interactive=False)
37
+
38
+
39
+ def add_file(state_dict: Dict, history: List, file) -> List:
40
+ bot_backend = get_bot_backend(state_dict)
41
+ path = file.name
42
+ filename = os.path.basename(path)
43
+
44
+ bot_msg = [f'📁[{filename}]', None]
45
+ history.append(bot_msg)
46
+
47
+ bot_backend.add_file_message(path=path, bot_msg=bot_msg)
48
+
49
+ return history
50
+
51
+
52
+ def undo_upload_file(state_dict: Dict, history: List) -> Tuple[List, Dict]:
53
+ bot_backend = get_bot_backend(state_dict)
54
+ bot_msg = bot_backend.revoke_file()
55
+
56
+ if bot_msg is None:
57
+ return history, gr.Button.update(interactive=False)
58
+
59
+ else:
60
+ assert history[-1] == bot_msg
61
+ del history[-1]
62
+ if bot_backend.revocable_files:
63
+ return history, gr.Button.update(interactive=True)
64
+ else:
65
+ return history, gr.Button.update(interactive=False)
66
+
67
+
68
+ def refresh_file_display(state_dict: Dict) -> List[str]:
69
+ bot_backend = get_bot_backend(state_dict)
70
+ work_dir = bot_backend.jupyter_work_dir
71
+ filenames = os.listdir(work_dir)
72
+ paths = []
73
+ for filename in filenames:
74
+ paths.append(
75
+ os.path.join(work_dir, filename)
76
+ )
77
+ return paths
78
+
79
+
80
+ def restart_ui(history: List) -> Tuple[List, Dict, Dict, Dict, Dict]:
81
+ history.append(["Welcome to the chatbot!", ""])
82
+ return (
83
+ history,
84
+ gr.Textbox.update(value="", interactive=False),
85
+ gr.Button.update(interactive=False),
86
+ gr.Button.update(interactive=False),
87
+ gr.Button.update(interactive=False)
88
+ )
89
+
90
+
91
+ def restart_bot_backend(state_dict: Dict) -> None:
92
+ bot_backend = get_bot_backend(state_dict)
93
+ bot_backend.restart()
94
+
95
+
96
+ def bot(state_dict: Dict, history: List) -> List:
97
+ bot_backend = get_bot_backend(state_dict)
98
+
99
+ while bot_backend.finish_reason in ('new_input', 'function_call'):
100
+ if history[-1][0] is None:
101
+ history.append(
102
+ ['welcome', ""]
103
+ )
104
+ else:
105
+ history[-1][1] = ""
106
+
107
+ response = chat_completion(bot_backend=bot_backend)
108
+ for chunk in response:
109
+ history, weather_exit = parse_response(
110
+ chunk=chunk,
111
+ history=history,
112
+ bot_backend=bot_backend
113
+ )
114
+ yield history
115
+ if weather_exit:
116
+ exit(-1)
117
+
118
+ yield history
119
+
120
+
121
+ if __name__ == '__main__':
122
+ config = get_config()
123
+ custom_css = """
124
+ .gradio-container {
125
+ background-color: white ;
126
+ margin: 0 !important ;
127
+ padding: 0 !important ;
128
+ space : 0
129
+ }
130
+
131
+ #mainDiv {
132
+ width :100%;
133
+ border : none;
134
+ height: 100vh ;
135
+ }
136
+
137
+ #chatbot_div{
138
+ padding : 10px
139
+ }
140
+
141
+ #chatbot {
142
+ border-color: #a6a6a6 ;
143
+ background-color: white ;
144
+ border-radius: 5px;
145
+ }
146
+
147
+ #sidebar {
148
+ background-color: #f2f2f2;
149
+ padding : 5px ;
150
+ }
151
+
152
+ #files {
153
+
154
+ height : 60% ;
155
+ color : #f2f2f2 ;
156
+ background-color : #f2f2f2 ;
157
+ border : none
158
+ }
159
+
160
+ #gpt4_button {
161
+ border-radius: 0px ;
162
+ background-color : #0CAFFF ;
163
+ padding : 10px ;
164
+ }
165
+
166
+ #gpt4_button:hover {
167
+ background-color : #0ca6ff ;
168
+ }
169
+
170
+ #textbox {
171
+ border-color : #a6a6a6 ;
172
+ background-color : white ;
173
+ }
174
+
175
+ #textbox:hover {
176
+ border-color : black ;
177
+ }
178
+
179
+ #upload_button {
180
+ background-color : #b3b3b3;
181
+ }
182
+
183
+ #upload_button:hover {
184
+ background-color : #8c8c8c ;
185
+ box-shadow: 0 12px 16px 0 rgba(0,0,0,0.24),0 17px 50px 0 rgba(0,0,0,0.19);
186
+ }
187
+
188
+ /* width */
189
+ ::-webkit-scrollbar {
190
+ width: 4px;
191
+ }
192
+
193
+ /* Track */
194
+ ::-webkit-scrollbar-track {
195
+ background: #f1f1f1;
196
+ }
197
+
198
+ /* Handle */
199
+ ::-webkit-scrollbar-thumb {
200
+ background: #0CAFFF;
201
+ }
202
+
203
+ /* Handle on hover */
204
+ ::-webkit-scrollbar-thumb:hover {
205
+ background: #555;
206
+ }
207
+
208
+ footer{display:none !important}
209
+
210
+ """
211
+
212
+ javascript_code = """
213
+ function get_browser_height() {
214
+ return window.innerHeight;
215
+ }
216
+ """
217
+ with gr.Blocks(theme=gr.themes.Base(),css= custom_css) as block:
218
+ """
219
+ Reference: https://www.gradio.app/guides/creating-a-chatbot-fast
220
+ """
221
+ # UI components
222
+ state = gr.State(value={"bot_backend": None})
223
+ with gr.Row( elem_id="mainDiv"):
224
+ with gr.Column( elem_id="sidebar" , scale=0.20):
225
+ sidebar_header = gr.HTML("<h1 style='color:black; font-weight:bold; text-align:center; margin-top : 10px ;'>DMO-GPT-Interpreter</h1>")
226
+ hr_linr = gr.HTML("<hr/>")
227
+ file_section = gr.HTML("<h2 style='color:gray; font-weight:bold; text-align:start; margin-top : 5px ; font-size: 18px ; margin-bottom: -3px'>Files</h2>")
228
+ file_output = gr.Files(elem_id="files", show_label=False )
229
+ setting_section = gr.HTML("<h2 style='color:gray; font-weight:bold; text-align:start; margin-top : 5px ; font-size: 18px ; margin-bottom: -3px'>Settings</h2>")
230
+ check_box = gr.Checkbox(label="Use with GPT-4 ✨", interactive=config['model']['GPT-4']['available'] , elem_id="gpt4_button" , scale=2)
231
+ check_box.change(fn=switch_to_gpt4, inputs=[state, check_box])
232
+ with gr.Column( elem_id="chatbot_div"):
233
+ chatbot = gr.Chatbot([], elem_id="chatbot", height=600 , label="Welcome to DMO-GPT-Interpreter!")
234
+ with gr.Row():
235
+ with gr.Column(scale=0.85 ):
236
+ text_box = gr.Textbox(
237
+ show_label=False,
238
+ placeholder="Enter text and press enter, or upload a file",
239
+ container=False,
240
+ elem_id='textbox'
241
+ )
242
+ with gr.Column(scale=0.15, min_width=0):
243
+ file_upload_button = gr.UploadButton("📁 Upload log files", file_types=['file'] , elem_id="upload_button")
244
+
245
+ # with gr.Tab("Chat"):
246
+ # chatbot = gr.Chatbot([], elem_id="chatbot", label="Local Code Interpreter", height=500)
247
+ # with gr.Row():
248
+ # with gr.Column(scale=0.85 ):
249
+ # text_box = gr.Textbox(
250
+ # show_label=False,
251
+ # placeholder="Enter text and press enter, or upload a file",
252
+ # container=False
253
+ #
254
+ # )
255
+ # with gr.Column(scale=0.15, min_width=0):
256
+ # file_upload_button = gr.UploadButton("📁", file_types=['file'])
257
+ # with gr.Row(equal_height=True):
258
+ # with gr.Column(scale=0.7):
259
+ # check_box = gr.Checkbox(label="Use GPT-4", interactive=config['model']['GPT-4']['available'])
260
+ # check_box.change(fn=switch_to_gpt4, inputs=[state, check_box])
261
+ # with gr.Column(scale=0.15, min_width=0):
262
+ # restart_button = gr.Button(value='🔄 Restart')
263
+ # with gr.Column(scale=0.15, min_width=0):
264
+ # undo_file_button = gr.Button(value="↩️Undo upload file", interactive=False)
265
+ # with gr.Tab("Files"):
266
+ # file_output = gr.Files()
267
+
268
+ # Components function binding
269
+ txt_msg = text_box.submit(add_text, [state, chatbot, text_box], [chatbot, text_box], queue=False).then(
270
+ bot, [state, chatbot], chatbot
271
+ )
272
+ txt_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
273
+ txt_msg.then(lambda: gr.update(interactive=True), None, [text_box], queue=False)
274
+ txt_msg.then(lambda: gr.Button.update(interactive=False), None, queue=False)
275
+
276
+ file_msg = file_upload_button.upload(
277
+ add_file, [state, chatbot, file_upload_button], [chatbot], queue=False
278
+ ).then(
279
+ bot, [state, chatbot], chatbot
280
+ )
281
+ file_msg.then(lambda: gr.Button.update(interactive=True), None, queue=False)
282
+ file_msg.then(fn=refresh_file_display, inputs=[state], outputs=[file_output])
283
+
284
+ block.load(fn=initialization, inputs=[state] , _js=javascript_code)
285
+
286
+ block.queue()
287
+ block.launch(inbrowser=True , auth = (username , password))
bot_backend.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import openai
3
+ import os
4
+ import copy
5
+ import shutil
6
+ from jupyter_backend import *
7
+ from typing import *
8
+
9
+ functions = [
10
+ {
11
+ "name": "execute_code",
12
+ "description": "This function allows you to execute Python code and retrieve the terminal output. If the code "
13
+ "generates image output, the function will return the text '[image]'. The code is sent to a "
14
+ "Jupyter kernel for execution. The kernel will remain active after execution, retaining all "
15
+ "variables in memory.",
16
+ "parameters": {
17
+ "type": "object",
18
+ "properties": {
19
+ "code": {
20
+ "type": "string",
21
+ "description": "The code text"
22
+ }
23
+ },
24
+ "required": ["code"],
25
+ }
26
+ }
27
+ ]
28
+
29
+ system_msg = '''You are an AI code interpreter.
30
+ Your goal is to help users do a variety of jobs by executing Python code. Specially in data analysis.
31
+
32
+ You should:
33
+ 1. Comprehend the user's requirements carefully & to the letter.
34
+ 2. Give a brief description for what you plan to do & call the provided function to run code.
35
+ 3. Provide results analysis based on the execution output.
36
+ 4. If error occurred, try to fix it.
37
+
38
+ Note: If the user uploads a file, you will receive a system message "User uploaded a file: filename". Use the filename as the path in the code. '''
39
+
40
+ with open('config.json') as f:
41
+ config = json.load(f)
42
+
43
+ if not config['API_KEY']:
44
+ config['API_KEY'] = os.getenv('OPENAI_API_KEY')
45
+ os.unsetenv('OPENAI_API_KEY')
46
+
47
+
48
+ def get_config():
49
+ return config
50
+
51
+
52
+ def config_openai_api(api_type, api_base, api_version, api_key):
53
+ openai.api_type = api_type
54
+ openai.api_base = api_base
55
+ openai.api_version = api_version
56
+ openai.api_key = api_key
57
+
58
+
59
+ class GPTResponseLog:
60
+ def __init__(self):
61
+ self.assistant_role_name = ''
62
+ self.content = ''
63
+ self.function_name = None
64
+ self.function_args_str = ''
65
+ self.display_code_block = ''
66
+ self.finish_reason = 'stop'
67
+ self.bot_history = None
68
+
69
+ def reset_gpt_response_log_values(self, exclude=None):
70
+ if exclude is None:
71
+ exclude = []
72
+
73
+ attributes = {'assistant_role_name': '',
74
+ 'content': '',
75
+ 'function_name': None,
76
+ 'function_args_str': '',
77
+ 'display_code_block': '',
78
+ 'finish_reason': 'stop',
79
+ 'bot_history': None}
80
+
81
+ for attr_name in exclude:
82
+ del attributes[attr_name]
83
+ for attr_name, value in attributes.items():
84
+ setattr(self, attr_name, value)
85
+
86
+ def set_assistant_role_name(self, assistant_role_name: str):
87
+ self.assistant_role_name = assistant_role_name
88
+
89
+ def add_content(self, content: str):
90
+ self.content += content
91
+
92
+ def set_function_name(self, function_name: str):
93
+ self.function_name = function_name
94
+
95
+ def copy_current_bot_history(self, bot_history: List):
96
+ self.bot_history = copy.deepcopy(bot_history)
97
+
98
+ def add_function_args_str(self, function_args_str: str):
99
+ self.function_args_str += function_args_str
100
+
101
+ def update_display_code_block(self, display_code_block):
102
+ self.display_code_block = display_code_block
103
+
104
+ def update_finish_reason(self, finish_reason: str):
105
+ self.finish_reason = finish_reason
106
+
107
+
108
+ class BotBackend(GPTResponseLog):
109
+ def __init__(self):
110
+ super().__init__()
111
+ self.unique_id = hash(id(self))
112
+ self.jupyter_work_dir = f'cache/work_dir_{self.unique_id}'
113
+ self.jupyter_kernel = JupyterKernel(work_dir=self.jupyter_work_dir)
114
+ self.gpt_model_choice = "GPT-3.5"
115
+ self.revocable_files = []
116
+ self._init_conversation()
117
+ self._init_api_config()
118
+ self._init_kwargs_for_chat_completion()
119
+
120
+ def _init_conversation(self):
121
+ first_system_msg = {'role': 'system', 'content': system_msg}
122
+ if hasattr(self, 'conversation'):
123
+ self.conversation.clear()
124
+ self.conversation.append(first_system_msg)
125
+ else:
126
+ self.conversation: List[Dict] = [first_system_msg]
127
+
128
+ def _init_api_config(self):
129
+ self.config = get_config()
130
+ api_type = self.config['API_TYPE']
131
+ api_base = self.config['API_base']
132
+ api_version = self.config['API_VERSION']
133
+ api_key = config['API_KEY']
134
+ config_openai_api(api_type, api_base, api_version, api_key)
135
+
136
+ def _init_kwargs_for_chat_completion(self):
137
+ self.kwargs_for_chat_completion = {
138
+ 'stream': True,
139
+ 'messages': self.conversation,
140
+ 'functions': functions,
141
+ 'function_call': 'auto'
142
+ }
143
+
144
+ model_name = self.config['model'][self.gpt_model_choice]['model_name']
145
+
146
+ if self.config['API_TYPE'] == 'azure':
147
+ self.kwargs_for_chat_completion['engine'] = model_name
148
+ else:
149
+ self.kwargs_for_chat_completion['model'] = model_name
150
+
151
+ def _clear_all_files_in_work_dir(self):
152
+ for filename in os.listdir(self.jupyter_work_dir):
153
+ os.remove(
154
+ os.path.join(self.jupyter_work_dir, filename)
155
+ )
156
+
157
+ def add_gpt_response_content_message(self):
158
+ self.conversation.append(
159
+ {'role': self.assistant_role_name, 'content': self.content}
160
+ )
161
+
162
+ def add_text_message(self, user_text):
163
+ self.conversation.append(
164
+ {'role': 'user', 'content': user_text}
165
+ )
166
+ self.revocable_files.clear()
167
+ self.update_finish_reason(finish_reason='new_input')
168
+
169
+ def add_file_message(self, path, bot_msg):
170
+ filename = os.path.basename(path)
171
+ work_dir = self.jupyter_work_dir
172
+
173
+ shutil.copy(path, work_dir)
174
+
175
+ gpt_msg = {'role': 'system', 'content': f'User uploaded a file: {filename}'}
176
+ self.conversation.append(gpt_msg)
177
+ self.revocable_files.append(
178
+ {
179
+ 'bot_msg': bot_msg,
180
+ 'gpt_msg': gpt_msg,
181
+ 'path': os.path.join(work_dir, filename)
182
+ }
183
+ )
184
+
185
+ def add_function_call_response_message(self, function_response: str, save_tokens=True):
186
+ self.conversation.append(
187
+ {
188
+ "role": self.assistant_role_name,
189
+ "name": self.function_name,
190
+ "content": self.function_args_str
191
+ }
192
+ )
193
+
194
+ if save_tokens and len(function_response) > 500:
195
+ function_response = f'{function_response[:200]}\n[Output too much, the middle part output is omitted]\n ' \
196
+ f'End part of output:\n{function_response[-200:]}'
197
+ self.conversation.append(
198
+ {
199
+ "role": "function",
200
+ "name": self.function_name,
201
+ "content": function_response,
202
+ }
203
+ )
204
+
205
+ def revoke_file(self):
206
+ if self.revocable_files:
207
+ file = self.revocable_files[-1]
208
+ bot_msg = file['bot_msg']
209
+ gpt_msg = file['gpt_msg']
210
+ path = file['path']
211
+
212
+ assert self.conversation[-1] is gpt_msg
213
+ del self.conversation[-1]
214
+
215
+ os.remove(path)
216
+
217
+ del self.revocable_files[-1]
218
+
219
+ return bot_msg
220
+ else:
221
+ return None
222
+
223
+ def update_gpt_model_choice(self, model_choice):
224
+ self.gpt_model_choice = model_choice
225
+ self._init_kwargs_for_chat_completion()
226
+
227
+ def restart(self):
228
+ self._clear_all_files_in_work_dir()
229
+ self.revocable_files.clear()
230
+ self._init_conversation()
231
+ self.reset_gpt_response_log_values()
232
+ self.jupyter_kernel.restart_jupyter_kernel()
config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "API_TYPE": "open_ai",
3
+ "API_base": "https://api.openai.com/v1",
4
+ "API_VERSION": null,
5
+ "API_KEY": "",
6
+ "model": {
7
+ "GPT-3.5": {
8
+ "model_name": "gpt-3.5-turbo-0613",
9
+ "available": true
10
+ },
11
+ "GPT-4": {
12
+ "model_name": "gpt-4-0613",
13
+ "available": true
14
+ }
15
+ }
16
+ }
17
+
functional.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bot_backend import *
2
+ import base64
3
+ import time
4
+
5
+
6
+ def chat_completion(bot_backend: BotBackend):
7
+ model_choice = bot_backend.gpt_model_choice
8
+ config = bot_backend.config
9
+ kwargs_for_chat_completion = bot_backend.kwargs_for_chat_completion
10
+
11
+ assert config['model'][model_choice]['available'], f"{model_choice} is not available for your API key"
12
+
13
+ response = openai.ChatCompletion.create(**kwargs_for_chat_completion)
14
+ return response
15
+
16
+
17
+ def add_function_response_to_bot_history(content_to_display, history, unique_id):
18
+ images, text = [], []
19
+
20
+ # terminal output
21
+ error_occurred = False
22
+ for mark, out_str in content_to_display:
23
+ if mark in ('stdout', 'execute_result_text', 'display_text'):
24
+ text.append(out_str)
25
+ elif mark in ('execute_result_png', 'execute_result_jpeg', 'display_png', 'display_jpeg'):
26
+ if 'png' in mark:
27
+ images.append(('png', out_str))
28
+ else:
29
+ images.append(('jpg', out_str))
30
+ elif mark == 'error':
31
+ text.append(delete_color_control_char(out_str))
32
+ error_occurred = True
33
+ text = '\n'.join(text).strip('\n')
34
+ if error_occurred:
35
+ history.append([None, f'❌Terminal output:\n```shell\n\n{text}\n```'])
36
+ else:
37
+ history.append([None, f'✔️Terminal output:\n```shell\n{text}\n```'])
38
+
39
+ # image output
40
+ for filetype, img in images:
41
+ image_bytes = base64.b64decode(img)
42
+ temp_path = f'cache/temp_{unique_id}'
43
+ if not os.path.exists(temp_path):
44
+ os.mkdir(temp_path)
45
+ path = f'{temp_path}/{hash(time.time())}.{filetype}'
46
+ with open(path, 'wb') as f:
47
+ f.write(image_bytes)
48
+ history.append(
49
+ [
50
+ None,
51
+ f'<img src=\"file={path}\" style=\'width: 600px; max-width:none; max-height:none\'>'
52
+ ]
53
+ )
54
+
55
+
56
+ def parse_json(function_args: str, finished: bool):
57
+ """
58
+ GPT may generate non-standard JSON format string, which contains '\n' in string value, leading to error when using
59
+ `json.loads()`.
60
+ Here we implement a parser to extract code directly from non-standard JSON string.
61
+ :return: code string if successfully parsed otherwise None
62
+ """
63
+ parser_log = {
64
+ 'met_begin_{': False,
65
+ 'begin_"code"': False,
66
+ 'end_"code"': False,
67
+ 'met_:': False,
68
+ 'met_end_}': False,
69
+ 'met_end_code_"': False,
70
+ "code_begin_index": 0,
71
+ "code_end_index": 0
72
+ }
73
+ try:
74
+ for index, char in enumerate(function_args):
75
+ if char == '{':
76
+ parser_log['met_begin_{'] = True
77
+ elif parser_log['met_begin_{'] and char == '"':
78
+ if parser_log['met_:']:
79
+ if finished:
80
+ parser_log['code_begin_index'] = index + 1
81
+ break
82
+ else:
83
+ if index + 1 == len(function_args):
84
+ return ''
85
+ else:
86
+ temp_code_str = function_args[index + 1:]
87
+ if '\n' in temp_code_str:
88
+ return temp_code_str.strip('\n')
89
+ else:
90
+ return json.loads(function_args + '"}')['code']
91
+ elif parser_log['begin_"code"']:
92
+ parser_log['end_"code"'] = True
93
+ else:
94
+ parser_log['begin_"code"'] = True
95
+ elif parser_log['end_"code"'] and char == ':':
96
+ parser_log['met_:'] = True
97
+ else:
98
+ continue
99
+ if finished:
100
+ for index, char in enumerate(function_args[::-1]):
101
+ back_index = -1 - index
102
+ if char == '}':
103
+ parser_log['met_end_}'] = True
104
+ elif parser_log['met_end_}'] and char == '"':
105
+ parser_log['code_end_index'] = back_index - 1
106
+ break
107
+ else:
108
+ continue
109
+ code_str = function_args[parser_log['code_begin_index']: parser_log['code_end_index'] + 1]
110
+ if '\n' in code_str:
111
+ return code_str.strip('\n')
112
+ else:
113
+ return json.loads(function_args)['code']
114
+
115
+ except Exception as e:
116
+ return None
jupyter_backend.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jupyter_client
2
+ import re
3
+
4
+
5
+ def delete_color_control_char(string):
6
+ ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
7
+ return ansi_escape.sub('', string)
8
+
9
+
10
+ class JupyterKernel:
11
+ def __init__(self, work_dir):
12
+ self.kernel_manager, self.kernel_client = jupyter_client.manager.start_new_kernel(kernel_name='python3')
13
+ self.work_dir = work_dir
14
+ self._create_work_dir()
15
+ self.available_functions = {
16
+ 'execute_code': self.execute_code,
17
+ 'python': self.execute_code
18
+ }
19
+
20
+ def execute_code_(self, code):
21
+ msg_id = self.kernel_client.execute(code)
22
+
23
+ # Get the output of the code
24
+ iopub_msg = self.kernel_client.get_iopub_msg()
25
+
26
+ all_output = []
27
+ while True:
28
+ if iopub_msg['msg_type'] == 'stream':
29
+ if iopub_msg['content'].get('name') == 'stdout':
30
+ output = iopub_msg['content']['text']
31
+ all_output.append(('stdout', output))
32
+ iopub_msg = self.kernel_client.get_iopub_msg()
33
+ elif iopub_msg['msg_type'] == 'execute_result':
34
+ if 'data' in iopub_msg['content']:
35
+ if 'text/plain' in iopub_msg['content']['data']:
36
+ output = iopub_msg['content']['data']['text/plain']
37
+ all_output.append(('execute_result_text', output))
38
+ if 'text/html' in iopub_msg['content']['data']:
39
+ output = iopub_msg['content']['data']['text/html']
40
+ all_output.append(('execute_result_html', output))
41
+ if 'image/png' in iopub_msg['content']['data']:
42
+ output = iopub_msg['content']['data']['image/png']
43
+ all_output.append(('execute_result_png', output))
44
+ if 'image/jpeg' in iopub_msg['content']['data']:
45
+ output = iopub_msg['content']['data']['image/jpeg']
46
+ all_output.append(('execute_result_jpeg', output))
47
+ iopub_msg = self.kernel_client.get_iopub_msg()
48
+ elif iopub_msg['msg_type'] == 'display_data':
49
+ if 'data' in iopub_msg['content']:
50
+ if 'text/plain' in iopub_msg['content']['data']:
51
+ output = iopub_msg['content']['data']['text/plain']
52
+ all_output.append(('display_text', output))
53
+ if 'text/html' in iopub_msg['content']['data']:
54
+ output = iopub_msg['content']['data']['text/html']
55
+ all_output.append(('display_html', output))
56
+ if 'image/png' in iopub_msg['content']['data']:
57
+ output = iopub_msg['content']['data']['image/png']
58
+ all_output.append(('display_png', output))
59
+ if 'image/jpeg' in iopub_msg['content']['data']:
60
+ output = iopub_msg['content']['data']['image/jpeg']
61
+ all_output.append(('display_jpeg', output))
62
+ iopub_msg = self.kernel_client.get_iopub_msg()
63
+ elif iopub_msg['msg_type'] == 'error':
64
+ if 'traceback' in iopub_msg['content']:
65
+ output = '\n'.join(iopub_msg['content']['traceback'])
66
+ all_output.append(('error', output))
67
+ iopub_msg = self.kernel_client.get_iopub_msg()
68
+ elif iopub_msg['msg_type'] == 'status' and iopub_msg['content'].get('execution_state') == 'idle':
69
+ break
70
+ else:
71
+ iopub_msg = self.kernel_client.get_iopub_msg()
72
+
73
+ return all_output
74
+
75
+ def execute_code(self, code):
76
+ text_to_gpt = []
77
+ content_to_display = self.execute_code_(code)
78
+ for mark, out_str in content_to_display:
79
+ if mark in ('stdout', 'execute_result_text', 'display_text'):
80
+ text_to_gpt.append(out_str)
81
+ elif mark in ('execute_result_png', 'execute_result_jpeg', 'display_png', 'display_jpeg'):
82
+ text_to_gpt.append('[image]')
83
+ elif mark == 'error':
84
+ text_to_gpt.append(delete_color_control_char(out_str))
85
+
86
+ return '\n'.join(text_to_gpt), content_to_display
87
+
88
+ def _create_work_dir(self):
89
+ # set work dir in jupyter environment
90
+ init_code = f"import os\n" \
91
+ f"if not os.path.exists('{self.work_dir}'):\n" \
92
+ f" os.mkdir('{self.work_dir}')\n" \
93
+ f"os.chdir('{self.work_dir}')\n" \
94
+ f"del os"
95
+ self.execute_code_(init_code)
96
+
97
+ def restart_jupyter_kernel(self):
98
+ self.kernel_client.shutdown()
99
+ self.kernel_manager, self.kernel_client = jupyter_client.manager.start_new_kernel(kernel_name='python3')
100
+ self._create_work_dir()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ notebook==6.5.4
2
+ openai==0.27.8
3
+ gradio==3.39.0
response_parser.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABCMeta, abstractmethod
2
+ from functional import *
3
+
4
+
5
+ class ChoiceStrategy(metaclass=ABCMeta):
6
+ def __init__(self, choice):
7
+ self.choice = choice
8
+ self.delta = choice['delta']
9
+
10
+ @abstractmethod
11
+ def support(self):
12
+ pass
13
+
14
+ @abstractmethod
15
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
16
+ pass
17
+
18
+
19
+ class RoleChoiceStrategy(ChoiceStrategy):
20
+
21
+ def support(self):
22
+ return 'role' in self.delta
23
+
24
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
25
+ bot_backend.set_assistant_role_name(assistant_role_name=self.delta['role'])
26
+ return history, whether_exit
27
+
28
+
29
+ class ContentChoiceStrategy(ChoiceStrategy):
30
+ def support(self):
31
+ return 'content' in self.delta and self.delta['content'] is not None
32
+ # null value of content often occur in function call:
33
+ # {
34
+ # "role": "assistant",
35
+ # "content": null,
36
+ # "function_call": {
37
+ # "name": "python",
38
+ # "arguments": ""
39
+ # }
40
+ # }
41
+
42
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
43
+ bot_backend.add_content(content=self.delta.get('content', ''))
44
+ history[-1][1] = bot_backend.content
45
+ return history, whether_exit
46
+
47
+
48
+ class NameFunctionCallChoiceStrategy(ChoiceStrategy):
49
+ def support(self):
50
+ return 'function_call' in self.delta and 'name' in self.delta['function_call']
51
+
52
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
53
+ function_dict = bot_backend.jupyter_kernel.available_functions
54
+ bot_backend.set_function_name(function_name=self.delta['function_call']['name'])
55
+ bot_backend.copy_current_bot_history(bot_history=history)
56
+ if bot_backend.function_name not in function_dict:
57
+ history.append(
58
+ [
59
+ None,
60
+ f'GPT attempted to call a function that does '
61
+ f'not exist: {bot_backend.function_name}\n '
62
+ ]
63
+ )
64
+ whether_exit = True
65
+
66
+ return history, whether_exit
67
+
68
+
69
+ class ArgumentsFunctionCallChoiceStrategy(ChoiceStrategy):
70
+
71
+ def support(self):
72
+ return 'function_call' in self.delta and 'arguments' in self.delta['function_call']
73
+
74
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
75
+ bot_backend.add_function_args_str(function_args_str=self.delta['function_call']['arguments'])
76
+
77
+ if bot_backend.function_name == 'python': # handle hallucinatory function calls
78
+ """
79
+ In practice, we have noticed that GPT, especially GPT-3.5, may occasionally produce hallucinatory
80
+ function calls. These calls involve a non-existent function named `python` with arguments consisting
81
+ solely of raw code text (not a JSON format).
82
+ """
83
+ temp_code_str = bot_backend.function_args_str
84
+ bot_backend.update_display_code_block(
85
+ display_code_block="\n🔴Working:\n```python\n{}\n```".format(temp_code_str)
86
+ )
87
+ history = copy.deepcopy(bot_backend.bot_history)
88
+ history[-1][1] += bot_backend.display_code_block
89
+ else:
90
+ temp_code_str = parse_json(function_args=bot_backend.function_args_str, finished=False)
91
+ if temp_code_str is not None:
92
+ bot_backend.update_display_code_block(
93
+ display_code_block="\n🔴Working:\n```python\n{}\n```".format(
94
+ temp_code_str
95
+ )
96
+ )
97
+ history = copy.deepcopy(bot_backend.bot_history)
98
+ history[-1][1] += bot_backend.display_code_block
99
+
100
+ return history, whether_exit
101
+
102
+
103
+ class FinishReasonChoiceStrategy(ChoiceStrategy):
104
+ def support(self):
105
+ return self.choice['finish_reason'] is not None
106
+
107
+ def execute(self, bot_backend: BotBackend, history: List, whether_exit: bool):
108
+ function_dict = bot_backend.jupyter_kernel.available_functions
109
+
110
+ if bot_backend.content:
111
+ bot_backend.add_gpt_response_content_message()
112
+
113
+ bot_backend.update_finish_reason(finish_reason=self.choice['finish_reason'])
114
+ if bot_backend.finish_reason == 'function_call':
115
+ try:
116
+
117
+ code_str = self.get_code_str(bot_backend)
118
+
119
+ bot_backend.update_display_code_block(
120
+ display_code_block="\n🟢Working:\n```python\n{}\n```".format(code_str)
121
+ )
122
+ history = copy.deepcopy(bot_backend.bot_history)
123
+ history[-1][1] += bot_backend.display_code_block
124
+
125
+ # function response
126
+ text_to_gpt, content_to_display = function_dict[
127
+ bot_backend.function_name
128
+ ](code_str)
129
+
130
+ # add function call to conversion
131
+ bot_backend.add_function_call_response_message(function_response=text_to_gpt, save_tokens=True)
132
+
133
+ add_function_response_to_bot_history(
134
+ content_to_display=content_to_display, history=history, unique_id=bot_backend.unique_id
135
+ )
136
+
137
+ except json.JSONDecodeError:
138
+ history.append(
139
+ [None, f"GPT generate wrong function args: {bot_backend.function_args_str}"]
140
+ )
141
+ whether_exit = True
142
+ return history, whether_exit
143
+
144
+ except Exception as e:
145
+ history.append([None, f'Backend error: {e}'])
146
+ whether_exit = True
147
+ return history, whether_exit
148
+
149
+ bot_backend.reset_gpt_response_log_values(exclude=['finish_reason'])
150
+
151
+ return history, whether_exit
152
+
153
+ @staticmethod
154
+ def get_code_str(bot_backend):
155
+ if bot_backend.function_name == 'python':
156
+ code_str = bot_backend.function_args_str
157
+ else:
158
+ code_str = parse_json(function_args=bot_backend.function_args_str, finished=True)
159
+ if code_str is None:
160
+ raise json.JSONDecodeError
161
+ return code_str
162
+
163
+
164
+ class ChoiceHandler:
165
+ strategies = [
166
+ RoleChoiceStrategy, ContentChoiceStrategy, NameFunctionCallChoiceStrategy,
167
+ ArgumentsFunctionCallChoiceStrategy, FinishReasonChoiceStrategy
168
+ ]
169
+
170
+ def __init__(self, choice):
171
+ self.choice = choice
172
+
173
+ def handle(self, bot_backend: BotBackend, history: List, whether_exit: bool):
174
+ for Strategy in self.strategies:
175
+ strategy_instance = Strategy(choice=self.choice)
176
+ if not strategy_instance.support():
177
+ continue
178
+ history, whether_exit = strategy_instance.execute(
179
+ bot_backend=bot_backend,
180
+ history=history,
181
+ whether_exit=whether_exit
182
+ )
183
+ return history, whether_exit
184
+
185
+
186
+ def parse_response(chunk, history, bot_backend: BotBackend):
187
+ """
188
+ :return: history, whether_exit
189
+ """
190
+ whether_exit = False
191
+ if chunk['choices']:
192
+ choice = chunk['choices'][0]
193
+ choice_handler = ChoiceHandler(choice=choice)
194
+ history, whether_exit = choice_handler.handle(
195
+ history=history,
196
+ bot_backend=bot_backend,
197
+ whether_exit=whether_exit
198
+ )
199
+
200
+ return history, whether_exit