Commit 9bf3bfb8 authored by JinyuanSun's avatar JinyuanSun
Browse files

support the new model

parent 2d0107a4
Loading
Loading
Loading
Loading
+11 −4
Original line number Original line Diff line number Diff line
@@ -6,13 +6,14 @@ st.sidebar.markdown("Welcome to ChatMol! ChatMol is a tool that allows you to in


openai_llms = ['gpt-4o', 'gpt-4-turbo', 'gpt-3.5-turbo']
openai_llms = ['gpt-4o', 'gpt-4-turbo', 'gpt-3.5-turbo']
claude_llms = ['claude-3-5-sonnet-20240620', 'claude-3-sonnet-20240229', 'claude-3-haiku-20240307', 'claude-3-opus-20240229']
claude_llms = ['claude-3-5-sonnet-20240620', 'claude-3-sonnet-20240229', 'claude-3-haiku-20240307', 'claude-3-opus-20240229']
chatmol_llms = ['chatlite']
chatmol_llms = ['chatlite', "chatmol-llm-0.1"]


introduction_of_models = {
introduction_of_models = {
    'gpt-4o': "GPT-4o (“o” for “omni”) is most advanced model of OpenAI. It has the same high intelligence as GPT-4 Turbo but is much more efficient—it generates text 2x faster and is 50% cheaper.",
    'gpt-4o': "GPT-4o (“o” for “omni”) is most advanced model of OpenAI. It has the same high intelligence as GPT-4 Turbo but is much more efficient—it generates text 2x faster and is 50% cheaper.",
    'gpt-4-turbo': "GPT-4 can solve difficult problems with greater accuracy than any of previous models of OpenAI, thanks to its broader general knowledge and advanced reasoning capabilities.",
    'gpt-4-turbo': "GPT-4 can solve difficult problems with greater accuracy than any of previous models of OpenAI, thanks to its broader general knowledge and advanced reasoning capabilities.",
    'gpt-3.5-turbo': "GPT-3.5 Turbo models can understand and generate natural language or code and have been optimized for chat.",
    'gpt-3.5-turbo': "GPT-3.5 Turbo models can understand and generate natural language or code and have been optimized for chat.",
    'chatlite': "A model provided by ChatMol freely available to all, which is optimized for PyMOL commands generation but not good for general chat.",
    'chatlite': "A service provided by ChatMol freely available to all, which is optimized for PyMOL commands generation but not good for general chat.",
    'chatmol-llm-0.1': "A model provided by ChatMol freely available to all, which is optimized for PyMOL commands generation but still can be used for general chat.",
    'claude-3-5-sonnet-20240620': "Most intelligent model of Anthropic, combining top-tier performance with improved speed. Currently the only model in the Claude 3.5 family.\n - Advanced research and analysis\n - Complex problem-solving\n - Sophisticated language understanding and generation\n - High-level strategic planning",
    'claude-3-5-sonnet-20240620': "Most intelligent model of Anthropic, combining top-tier performance with improved speed. Currently the only model in the Claude 3.5 family.\n - Advanced research and analysis\n - Complex problem-solving\n - Sophisticated language understanding and generation\n - High-level strategic planning",
    'claude-3-sonnet-20240229': "Balances intelligence and speed for high-throughput tasks.\n - Data processing over vast amounts of knowledge\n - Sales forecasting and targeted marketing\n - Code generation and quality control",
    'claude-3-sonnet-20240229': "Balances intelligence and speed for high-throughput tasks.\n - Data processing over vast amounts of knowledge\n - Sales forecasting and targeted marketing\n - Code generation and quality control",
    'claude-3-haiku-20240307': "Near-instant responsiveness that can mimic human interactions.\n - Live support chat\n - Translations\n - Content moderation\n - Extracting knowledge from unstructured data",
    'claude-3-haiku-20240307': "Near-instant responsiveness that can mimic human interactions.\n - Live support chat\n - Translations\n - Content moderation\n - Extracting knowledge from unstructured data",
@@ -69,14 +70,20 @@ if prompt := st.chat_input("What is up?"):
            elif st.session_state["llm"] in claude_llms:
            elif st.session_state["llm"] in claude_llms:
                response = st.session_state["cm"].chat_with_claude(f"This is the log: \n\n{st.session_state['ps'].pymol_console}\n\n. This is my question: \n\n{prompt}")
                response = st.session_state["cm"].chat_with_claude(f"This is the log: \n\n{st.session_state['ps'].pymol_console}\n\n. This is my question: \n\n{prompt}")
            elif st.session_state["llm"] in chatmol_llms:
            elif st.session_state["llm"] in chatmol_llms:
                response = st.session_state["cm"].chatlite(f"Instruction: {prompt}")
                if st.session_state["llm"] == "chatlite":
                    response = st.session_state["ps"].chatlite(f"Instruction: {prompt}")
                else:
                    response = st.session_state["ps"].chatmol(f"{prompt}")
        else:
        else:
            if st.session_state["llm"] in openai_llms:
            if st.session_state["llm"] in openai_llms:
                response = st.session_state["ps"].chatgpt(f"This is the log: \n\n{st.session_state['ps'].pymol_console}\n\n. This is my instruction: \n\n{prompt}")
                response = st.session_state["ps"].chatgpt(f"This is the log: \n\n{st.session_state['ps'].pymol_console}\n\n. This is my instruction: \n\n{prompt}")
            elif st.session_state["llm"] in claude_llms:
            elif st.session_state["llm"] in claude_llms:
                response = st.session_state["ps"].claude(f"This is the log: \n\n{st.session_state['ps'].pymol_console}\n\n. This is my instruction: \n\n{prompt}")
                response = st.session_state["ps"].claude(f"This is the log: \n\n{st.session_state['ps'].pymol_console}\n\n. This is my instruction: \n\n{prompt}")
            elif st.session_state["llm"] in chatmol_llms:
            elif st.session_state["llm"] in chatmol_llms:
                if st.session_state["llm"] == "chatlite":
                    response = st.session_state["ps"].chatlite(f"Instruction: {prompt}")
                    response = st.session_state["ps"].chatlite(f"Instruction: {prompt}")
                else:
                    response = st.session_state["ps"].chatmol(f"{prompt}")
    
    
        st.session_state.messages.append({"role": "assistant", "content": response})
        st.session_state.messages.append({"role": "assistant", "content": response})
    with st.chat_message("assistant"):
    with st.chat_message("assistant"):
+6 −1
Original line number Original line Diff line number Diff line
@@ -12,6 +12,9 @@ def chat_with_gpt(message):
def chat_with_claude(message):
def chat_with_claude(message):
    return defaul_client.chat_with_claude(message)
    return defaul_client.chat_with_claude(message)


def chat_with_chatmol_llm(message):
    return defaul_client.chat_with_chatmol_llm(message)

def clear_stashed_commands():
def clear_stashed_commands():
    return defaul_client.clear_stashed_commands()
    return defaul_client.clear_stashed_commands()


@@ -25,3 +28,5 @@ def start_pymol_gui():


def warnings():
def warnings():
    return defaul_client.warnings
    return defaul_client.warnings

__version__ = "0.3.0"
 No newline at end of file
+44 −1
Original line number Original line Diff line number Diff line
@@ -151,3 +151,46 @@ class PymolServer():
        except Exception as e:
        except Exception as e:
            print(f"Error during command execution: {e}")
            print(f"Error during command execution: {e}")
        return response
        return response
    
    def chatmol(self, message, execute:bool=True):
        message = message.strip()
        if message == "e" or message == 'execute':
            if len(self.cm.stashed_commands) == 0:
                print("There is no stashed commands")
            else:
                for command in self.cm.stashed_commands:
                    self.server.do(command)
                self.cm.clear_stashed_commands()
            return 0
        
        if message == "new":
            self.cm.clear_chat_history()
            self.cm.clear_stashed_commands()
            return 0
        
        if message.endswith('?'):
            execute = False
        
        response = self.cm.chat_with_chatmol_llm(message)  # Using the chat_with_gpt method
        print("ChatMol:", response)
        try:
            command_blocks = []
            self.cm.clear_stashed_commands()
            for i, block in enumerate(response.split("```")):
                if i % 2 == 1:
                    command_blocks.append(block)
            for command_block in command_blocks:
                for command in command_block.split("\n"):
                    if command.strip() and not command.strip().startswith("#"):
                        if command.strip() == "pymol" or command.strip() == "bash":
                            continue  # Skipping python commands
                        if "#" in command:
                            command, comment = command.split("#")
                        if execute:
                            print(command)
                            self.server.do(command)
                        else:
                            self.cm.stashed_commands.append(command)
        except Exception as e:
            print(f"Error during command execution: {e}")
        return response
+31 −0
Original line number Original line Diff line number Diff line
@@ -27,6 +27,7 @@ class ChatMol:
        self.lite_conversation_history = ""
        self.lite_conversation_history = ""
        self.chatgpt_conversation_history = []
        self.chatgpt_conversation_history = []
        self.claude_conversation_messages = []
        self.claude_conversation_messages = []
        self.chatmol_llm_conversation_history = []
        self.chatgpt_sys_prompt = "You are an expert familiar with PyMOL and specialized in providing PyMOL command line code solutions accuratly, and concisely. "
        self.chatgpt_sys_prompt = "You are an expert familiar with PyMOL and specialized in providing PyMOL command line code solutions accuratly, and concisely. "
        self.chatgpt_sys_prompt += "When providing demos or examples, try to use 'fetch' if object name is not provided. "
        self.chatgpt_sys_prompt += "When providing demos or examples, try to use 'fetch' if object name is not provided. "
        self.chatgpt_sys_prompt += "Prefer academic style visulizations. Code within triple backticks, comment and code should not in the same line."
        self.chatgpt_sys_prompt += "Prefer academic style visulizations. Code within triple backticks, comment and code should not in the same line."
@@ -99,6 +100,11 @@ class ChatMol:




    def init_clients(self):
    def init_clients(self):
        self.client_chatmol = OpenAI(
            api_key="0",
            base_url="https://u48777-be32-7f3f0ef6.westb.seetacloud.com:8443/v1"
        )

        if os.environ.get("ANTHROPIC_API_KEY"):
        if os.environ.get("ANTHROPIC_API_KEY"):
            self.client_anthropic = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
            self.client_anthropic = anthropic.Anthropic(api_key=os.environ.get("ANTHROPIC_API_KEY"))
        elif api_key := self.load_api_key("anthropic") != "":
        elif api_key := self.load_api_key("anthropic") != "":
@@ -182,6 +188,31 @@ class ChatMol:
            print(f"Error: {e}")
            print(f"Error: {e}")
            return ""
            return ""
        
        
    def chat_with_chatmol_llm(self, message):
        self.chatmol_llm_conversation_history.append(
            {"role": "user", "content": message}
        )
        try:
            messages = []            
            for message in self.chatmol_llm_conversation_history[-self.chatgpt_max_history:]:
                messages.append(message)
            response = self.client_chatmol.chat.completions.create(
                model="test",
                messages=messages,
                max_tokens=self.chatgpt_max_tokens,
                n=1,
                temperature=0,
            )
            answer = response.choices[0].message.content.strip()

            self.chatmol_llm_conversation_history.append(
                {"role": "assistant", "content": answer}
            )
            return answer
        except Exception as e:
            print(f"Error: {e}")
            return ""

    def chat_with_claude(self, message):
    def chat_with_claude(self, message):
        try:
        try:
            self.claude_conversation_messages.append({"role": "user", "content": message})
            self.claude_conversation_messages.append({"role": "user", "content": message})
+1 −1
Original line number Original line Diff line number Diff line
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages


setup(
setup(
    name='chatmol',
    name='chatmol',
    version='0.2.3',
    version='0.3.0',
    packages=find_packages(),
    packages=find_packages(),
    install_requires=[
    install_requires=[
        'requests', 
        'requests',