Commit 03d43f9e authored by JinyuanSun's avatar JinyuanSun
Browse files

update system prompt of chatmol llm

parent 829647d2
Loading
Loading
Loading
Loading
+5 −2
Original line number Original line Diff line number Diff line
@@ -31,7 +31,9 @@ class ChatMol:
        self.chatgpt_sys_prompt = "You are an expert familiar with PyMOL and specialized in providing PyMOL command line code solutions accuratly, and concisely. "
        self.chatgpt_sys_prompt = "You are an expert familiar with PyMOL and specialized in providing PyMOL command line code solutions accuratly, and concisely. "
        self.chatgpt_sys_prompt += "When providing demos or examples, try to use 'fetch' if object name is not provided. "
        self.chatgpt_sys_prompt += "When providing demos or examples, try to use 'fetch' if object name is not provided. "
        self.chatgpt_sys_prompt += "Prefer academic style visulizations. Code within triple backticks, comment and code should not in the same line. "
        self.chatgpt_sys_prompt += "Prefer academic style visulizations. Code within triple backticks, comment and code should not in the same line. "

        self.chatmol_llm_prompt_dict = {
            "v1": "You are a helpful assistant. User's query may be a instruction to write PyMOL commands."
        }
        self.chatgpt_max_history = chatgpt_max_history
        self.chatgpt_max_history = chatgpt_max_history
        self.gpt_model = gpt_model
        self.gpt_model = gpt_model
        self.claude_model = claude_model
        self.claude_model = claude_model
@@ -197,6 +199,7 @@ class ChatMol:
            if len(messages) > self.chatgpt_max_history:
            if len(messages) > self.chatgpt_max_history:
                messages.pop(0)
                messages.pop(0)
                messages.pop(0)
                messages.pop(0)
            messages = [{'role': 'system', 'content': self.chatmol_llm_prompt_dict["v1"]}] + messages
            response = self.client_chatmol.chat.completions.create(
            response = self.client_chatmol.chat.completions.create(
                model="test",
                model="test",
                messages=messages,
                messages=messages,